filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_26870
|
import pandas as pd
import glob
from dotenv import load_dotenv
import os, sys
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
from db import connect_to_db
# USAGE: $ python populate_price_table.py <PATH_TO_FILES>
args = sys.argv[1:]
path = args[0]
# Create database connection
engine = connect_to_db()
# Looping through all the timeframes directories
timeframe_dirs = os.listdir(f"{path}")
for timeframe in timeframe_dirs:
# Removes '.DS_Store' in case the script is running in a mac computer
if timeframe == ".DS_Store":
timeframe_dirs.remove(".DS_Store")
for timeframe in timeframe_dirs:
os.chdir(f"{path}/{timeframe}")
assets_files = glob.glob("*.csv")
symbol_list = [s[:-4] for s in assets_files]
# Looping through all the symbols and adding it to the price table
for symbol in symbol_list:
df = pd.read_csv(f"{path}/{timeframe}/{symbol}.csv")[
["time", "open", "high", "low", "close", "real_volume"]
]
df.rename(
columns={"time": "datetime", "real_volume": "volume"}, inplace=True
)
# Importing asset_id and timeframe_id from tables in PostgreSQL
asset_id = pd.read_sql(
f"SELECT id FROM asset WHERE symbol = '{symbol}';", engine
)
if len(asset_id) == 0:
query = f"INSERT INTO asset (symbol) VALUES ('{symbol}');"
engine.execute(query)
asset_id = pd.read_sql(
f"SELECT id FROM asset WHERE symbol = '{symbol}';", engine
)
# Creating the columns
df["asset_id"] = int(asset_id["id"])
df["timeframe_id"] = f"{timeframe}"
# Writing the SQL query to populate price table
insert_init = """
INSERT INTO price (datetime, open, high, low, close, volume, asset_id, timeframe_id)
VALUES
"""
values = ",".join(
[
"('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')".format(
row["datetime"],
row["open"],
row["high"],
row["low"],
row["close"],
row["volume"],
row["asset_id"],
row["timeframe_id"]
)
for datetime, row in df.iterrows()
]
)
insert_end = """
ON CONFLICT (datetime, asset_id, timeframe_id) DO UPDATE
SET
open = EXCLUDED.open,
high = EXCLUDED.high,
low = EXCLUDED.low,
close = EXCLUDED.close,
volume = EXCLUDED.volume;
"""
query = insert_init + values + insert_end
engine.execute(query)
print("Script Successfully Executed!")
|
the-stack_0_26872
|
import sqlite3
def create_connection(db_file):
try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
print(sqlite3.version)
return conn
except sqlite3.Error as e:
print(e)
return e
def display_weather_table(c):
query = """
SELECT * FROM Weather;
"""
c.execute(query)
rows = c.fetchall()
for row in range(len(rows)):
print(row, rows[row])
#CREATING THE TABLE "WEATHER"
if __name__=="__main__":
conn = create_connection("database.db")
c = conn.cursor()
create_weather_table = """
CREATE TABLE IF NOT EXISTS Weather(
_id TEXT PRIMARY KEY,
_index TEXT,
_type TEXT,
_score INTEGER,
_location TEXT,
timestamp TEXT,
humidity FLOAT,
temperature FLOAT,
wind_desc TEXT,
wind_direc TEXT,
feels_like FLOAT
);
"""
c.execute(create_weather_table)
c.close()
conn.close()
|
the-stack_0_26873
|
"""
sqlite_releases
~~~~~~~~~~~~~~~
Create a feed out of the SQLite release history pages at:
* https://www.sqlite.org/changes.html
* https://www.sqlite.org/chronology.html
Also serves as an example of how to write custom parsers.
This plugin needs additional dependencies, use the ``unstable-plugins`` extra
to install them:
.. code-block:: bash
pip install reader[unstable-plugins]
To load::
READER_PLUGIN='reader._plugins.sqlite_releases:init' \\
python -m reader serve
"""
import warnings
from datetime import datetime
from urllib.parse import urlparse
from urllib.parse import urlunparse
import bs4
from reader._parser import wrap_exceptions
from reader._types import EntryData
from reader._types import FeedData
warnings.filterwarnings(
'ignore',
message='No parser was explicitly specified',
module='reader._plugins.sqlite_releases',
)
FULL_URL = 'https://www.sqlite.org/changes.html'
URLS = [FULL_URL, 'https://www.sqlite.org/chronology.html']
def extract_text(soup):
for h3 in soup.select('body h3'):
a_name = None
for element, _ in zip(h3.previous_siblings, range(3)):
if element.name == 'h3':
break
if element.name == 'a' and 'name' in element.attrs:
a_name = element
break
content = []
last_a_name_index = None
for i, element in enumerate(h3.next_siblings):
if element.name == 'h3':
break
if element.name == 'a' and 'name' in element.attrs:
last_a_name_index = i
content.append(element)
if last_a_name_index and len(content) - last_a_name_index <= 3:
content = content[:last_a_name_index]
yield h3.text, a_name['name'] if a_name else None, ''.join(map(str, content))
def make_entries(feed_url, url, soup):
for title, fragment, content in extract_text(soup):
try:
updated = datetime.strptime(title.split()[0], '%Y-%m-%d')
except (ValueError, IndexError):
continue
link = urlunparse(urlparse(url)._replace(fragment=fragment))
yield EntryData(
feed_url=feed_url,
id=title,
updated=updated,
title=title,
link=link,
summary=content,
)
def make_feed(feed_url, url, soup):
return FeedData(url=feed_url, title=soup.title and soup.title.text, link=url)
def parse(url, file, headers):
with wrap_exceptions(url, "while reading feed"):
soup = bs4.BeautifulSoup(file)
with wrap_exceptions(url, "while parsing page"):
feed = make_feed(url, FULL_URL, soup)
entries = list(make_entries(url, FULL_URL, soup))
feed = feed._replace(updated=max(e.updated for e in entries))
return feed, entries
def init(reader):
for url in URLS:
reader._parser.mount_parser_by_url(url, parse)
|
the-stack_0_26874
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: prismacloud_alibaba_cloud_account
short_description: Manage an Alibaba cloud account onboarded to Prisma Cloud.
description:
- Manage an Alibaba cloud account onboarded to Prisma Cloud.
author:
- Garfield Lee Freeman (@shinmog)
version_added: "2.9"
extends_documentation_fragment:
- paloaltonetworks.prismacloud.fragments.state
options:
accountId:
description:
- Alibaba account ID.
- Either the accountId or the name must be specified.
groupIds:
description:
- List of account group IDs to which you are assigning this account.
type: list
name:
description:
- Name to be used for the account on the Prisma Cloud platform.
- Must be unique.
- Either the accountId or the name must be specified.
ramArn:
description:
- Unique identifier for an Alibaba RAM role resource.
enabled:
description:
- Whether or not the account is enabled.
type: bool
default: false
'''
EXAMPLES = '''
- name: add alibaba accont
prismacloud_alibaba_cloud_account:
name: 'foo'
ramArn: 'myRamArn'
enabled: true
'''
RETURN = '''
changed:
description: if a change was necessary
returned: success
type: bool
before:
description: the config before this module is invoked
returned: success
type: complex
after:
description: the config after this module is invoked
returned: success
type: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.prismacloud.plugins.module_utils import errors
from ansible_collections.paloaltonetworks.prismacloud.plugins.module_utils import prismacloud as pc
def identify(client, name):
listing = client.get(['cloud', 'name'], {'cloudType': 'alibaba_cloud'})
for x in listing:
if x['name'] == name:
return x['id']
def main():
module = AnsibleModule(
argument_spec=dict(
accountId=dict(),
enabled=dict(type='bool', default=False),
groupIds=dict(type='list'),
name=dict(),
ramArn=dict(),
state=pc.state_spec(),
),
required_one_of=[
['accountId', 'name'],
],
supports_check_mode=True,
)
client = pc.PrismaCloudRequest(module)
# Variables.
obj = None
results = {'changed': False}
# Retrieve obj details.
if module.params['accountId'] is not None:
try:
obj = client.get(['cloud', 'alibaba_cloud', module.params['accountId']])
except errors.ObjectNotFoundError:
pass
else:
the_id = identify(client, module.params['name'])
if the_id is not None:
obj = client.get(['cloud', 'alibaba_cloud', the_id])
results['before'] = obj
if module.params['state'] == 'present':
fields = ['accountId', 'enabled', 'groupIds', 'name', 'ramArn']
req_obj = {
'accountId': '',
'groupIds': [],
'name': '',
'enabled': False,
'ramArn': '',
}
for field in fields:
if module.params[field] is not None:
req_obj[field] = module.params[field]
if obj is None:
results['changed'] = True
if not module.check_mode:
client.post(['cloud', 'alibaba_cloud'], req_obj)
req_obj['accountId'] = identify(client, req_obj['name'])
else:
if not req_obj['accountId']:
req_obj['accountId'] = obj['accountId']
for field in fields:
if obj.get(field) != req_obj.get(field):
results['changed'] = True
if not module.check_mode:
client.put(['cloud', 'alibaba_cloud', req_obj['accountId']], req_obj)
break
results['after'] = req_obj
elif module.params['state'] == 'absent':
results['after'] = None
if obj is not None:
results['changed'] = True
if not module.check_mode:
client.delete(['cloud', 'alibaba_cloud', obj['accountId']])
# Done.
module.exit_json(**results)
if __name__ == '__main__':
main()
|
the-stack_0_26875
|
import io
import os
import re
import sys
from setuptools import setup, find_packages
PATH_BASE = os.path.dirname(__file__)
PACKAGE_DIR = PACKAGE_NAME = 'djsommo'
def read_file(fpath):
"""Reads a file within package directories."""
with io.open(os.path.join(PATH_BASE, fpath)) as f:
return f.read()
def get_version():
"""Returns version number, without module import (which can lead to ImportError
if some dependencies are unavailable before install."""
contents = read_file(os.path.join(PACKAGE_DIR, '__init__.py'))
version = re.search('VERSION = \(([^)]+)\)', contents)
version = version.group(1).replace(', ', '.').strip()
return version
setup(
name=PACKAGE_NAME,
version=get_version(),
url='http://github.com/jayvdb/' + PACKAGE_NAME,
description='Reusable Django app to safely use non-core optional model Meta options',
long_description=read_file('README.md'),
long_description_content_type="text/markdown",
license='MIT',
author='John Mark Vandenberg',
author_email='[email protected]',
packages=find_packages(),
zip_safe=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
the-stack_0_26876
|
import math
from abc import abstractmethod
import pycode_similar
from diff_match_patch import diff_match_patch
class Comparator:
@abstractmethod
def compare(self, a, b):
pass
class PycodeComparison(Comparator):
def compare(self, a, b):
res = pycode_similar.detect([a, b], diff_method=pycode_similar.UnifiedDiff)
val = res[0][1][0].plagiarism_percent
return val
class DiffComparator(Comparator):
def __init__(self, base=None) -> None:
super().__init__()
self.H = None
if base is not None:
self.H = set()
for e in base:
self.H.add(e)
def filter(self, s):
return [e for e in s if e not in self.H]
def compare(self, a, b):
if self.H is not None:
a, b = self.filter(a), self.filter(b)
dmp = diff_match_patch()
dmp.Diff_Timeout = math.inf
ret = dmp.diff_linesToChars("\n".join(a), "\n".join(b))
diff = dmp.diff_main(*ret)
dmp.diff_charsToLines(diff, ret[2])
#dmp.diff_cleanupSemantic(diff)
perc = 1 - len([e for e in diff if e[0] != 0]) / (len(a) + len(b))
#html = dmp.diff_prettyHtml(diff)
#with open("test.html", "w") as f:
# f.write(html)
return perc
|
the-stack_0_26878
|
import pandas as pd
dataset = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/pythonTutorial/online_raw.csv')
dataset.fillna(dataset.mean(), inplace = True)
from sklearn.preprocessing import MinMaxScaler
#Define MinMaxScaler as scaler
scaler = MinMaxScaler()
#list all the feature that need to be scaled
scaling_column = ['Administrative', 'Administrative_Duration', 'Informational', 'Informational_Duration', 'ProductRelated', 'ProductRelated_Duration', 'BounceRates', 'ExitRates', 'PageValues']
#Apply fit_transfrom to scale selected feature
dataset[scaling_column] = scaler.fit_transform(dataset[scaling_column])
#Cheking min and max value of the scaling_column
print(dataset[scaling_column].describe().T[['min','max']])
|
the-stack_0_26879
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Mark Feltner'
SITENAME = "Mark Feltner's Website"
SITENAME_SHORT = "Mark Feltner's Website"
EMAIL = '[email protected]'
SITEURL = ''
DESCRIPTION = "The thoughts and writings of Mark James Feltner."
THEME = 'theme/feltnerm'
DEVELOP = True
PATH = 'content'
TIMEZONE = 'America/Chicago'
READERS = {
'html': None
}
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
STATIC_PATHS = [
'static/CNAME',
'static/robots.txt',
'static/hackers.txt',
'static/humans.txt'
]
EXTRA_PATH_METADATA = {
'static/CNAME': { 'path': 'CNAME' },
'static/robots.txt': { 'path': 'robots.txt' },
'static/hackers.txt': { 'path': 'hackers.txt' },
'static/humans.txt': { 'path': 'humans.txt' }
}
REPO_HOME = 'https://github.com/feltnerm/blog'
TWITTER_USERNAME = 'feltnermj'
GITHUB_USERNAME = 'feltnerm'
LASTFM_USERNAME = 'plugitin'
FACEBOOK_USERNAME = 'feltnerm'
ANALYTICS = {
'GOOGLE': ''
}
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
DIRECT_TEMPLATES = ['index', 'archives', 'tags']
ARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
ARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = '{slug}/'
PAGE_SAVE_AS = '{slug}.html'
INDEX_SAVE_AS = 'blog/index.html'
ARCHIVES_URL = 'blog/archive'
ARCHIVES_SAVE_AS = 'blog/archive/index.html'
YEAR_ARCHIVE_URL = 'blog/{date:%Y}/'
YEAR_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/index.html'
MONTH_ARCHIVE_URL = 'blog/{date:%Y}/{date:%m}'
MONTH_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/index.html'
DAY_ARCHIVE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/'
DAY_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/index.html'
TAG_URL = 'blog/tag/{slug}/'
TAG_SAVE_AS = 'blog/tag/{slug}/index.html'
TAGS_URL = 'blog/tags/'
TAGS_SAVE_AS = 'blog/tags/index.html'
# don't generate these:
AUTHOR_SAVE_AS = ''
CATEGORY_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
MARKDOWN = {
'extension_configs': {
# 'markdown.extensions.codehilite': {'css_class': 'highlight'},
'markdown.extensions.smarty': {},
'markdown.extensions.sane_lists': {},
# "proxy" for `markdown.extensions.extra``
'pymdownx.extra': {},
# emphasis that is more like GFM
'pymdownx.betterem': {},
# use emoji shortcodes
'pymdownx.emoji': {},
# code highlighting
'pymdownx.highlight': {},
'pymdownx.inlinehilite': {},
'pymdownx.superfences': {},
# turn markdown links into ... links
'pymdownx.magiclink': {},
# strict parsing of headers
'pymdownx.saneheaders': {},
# fancy symbols
'pymdownx.smartsymbols': {},
# @todo: where did this extension go?!
# 'markdown.extensions.headerid': {},
},
'output_format': 'html5',
}
|
the-stack_0_26880
|
#!/usr/bin/env python3
# Copyright (c) 2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import infincoincashTier2TestFramework
from test_framework.util import (
assert_equal,
assert_true,
Decimal,
)
import time
"""
Test checking:
1) Masternodes setup/creation.
2) Proposal creation.
3) Vote creation.
4) Proposal and vote broadcast.
5) Proposal and vote sync.
"""
class MasternodeGovernanceBasicTest(infincoincashTier2TestFramework):
def check_budget_finalization_sync(self, votesCount, status):
for i in range(0, len(self.nodes)):
node = self.nodes[i]
budFin = node.mnfinalbudget("show")
assert_true(len(budFin) == 1, "MN budget finalization not synced in node" + str(i))
budget = budFin[next(iter(budFin))]
assert_equal(budget["VoteCount"], votesCount)
assert_equal(budget["Status"], status)
def broadcastbudgetfinalization(self, node, with_ping_mns=[]):
self.log.info("suggesting the budget finalization..")
assert (node.mnfinalbudgetsuggest() is not None)
self.log.info("confirming the budget finalization..")
time.sleep(1)
self.stake(4, with_ping_mns)
self.log.info("broadcasting the budget finalization..")
return node.mnfinalbudgetsuggest()
def check_proposal_existence(self, proposalName, proposalHash):
for node in self.nodes:
proposals = node.getbudgetinfo(proposalName)
assert(len(proposals) > 0)
assert_equal(proposals[0]["Hash"], proposalHash)
def check_vote_existence(self, proposalName, mnCollateralHash, voteType):
for i in range(0, len(self.nodes)):
node = self.nodes[i]
votesInfo = node.getbudgetvotes(proposalName)
assert(len(votesInfo) > 0)
found = False
for voteInfo in votesInfo:
if (voteInfo["mnId"].split("-")[0] == mnCollateralHash) :
assert_equal(voteInfo["Vote"], voteType)
found = True
assert_true(found, "Error checking vote existence in node " + str(i))
def get_proposal_obj(self, Name, URL, Hash, FeeHash, BlockStart, BlockEnd,
TotalPaymentCount, RemainingPaymentCount, PaymentAddress,
Ratio, Yeas, Nays, Abstains, TotalPayment, MonthlyPayment,
IsEstablished, IsValid, Allotted, TotalBudgetAllotted, IsInvalidReason = ""):
obj = {}
obj["Name"] = Name
obj["URL"] = URL
obj["Hash"] = Hash
obj["FeeHash"] = FeeHash
obj["BlockStart"] = BlockStart
obj["BlockEnd"] = BlockEnd
obj["TotalPaymentCount"] = TotalPaymentCount
obj["RemainingPaymentCount"] = RemainingPaymentCount
obj["PaymentAddress"] = PaymentAddress
obj["Ratio"] = Ratio
obj["Yeas"] = Yeas
obj["Nays"] = Nays
obj["Abstains"] = Abstains
obj["TotalPayment"] = TotalPayment
obj["MonthlyPayment"] = MonthlyPayment
obj["IsEstablished"] = IsEstablished
obj["IsValid"] = IsValid
if IsInvalidReason != "":
obj["IsInvalidReason"] = IsInvalidReason
obj["Allotted"] = Allotted
obj["TotalBudgetAllotted"] = TotalBudgetAllotted
return obj
def check_budgetprojection(self, expected):
for i in range(self.num_nodes):
assert_equal(self.nodes[i].getbudgetprojection(), expected)
self.log.info("Budget projection valid for node %d" % i)
def run_test(self):
self.enable_mocktime()
self.setup_2_masternodes_network()
# Prepare the proposal
self.log.info("preparing budget proposal..")
firstProposalName = "super-cool"
firstProposalLink = "https://forum.infincoincash.org/t/test-proposal"
firstProposalCycles = 2
firstProposalAddress = self.miner.getnewaddress()
firstProposalAmountPerCycle = 300
nextSuperBlockHeight = self.miner.getnextsuperblock()
proposalFeeTxId = self.miner.preparebudget(
firstProposalName,
firstProposalLink,
firstProposalCycles,
nextSuperBlockHeight,
firstProposalAddress,
firstProposalAmountPerCycle)
# generate 3 blocks to confirm the tx (and update the mnping)
self.stake(3, [self.remoteOne, self.remoteTwo])
# activate sporks
self.activate_spork(self.minerPos, "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT")
self.activate_spork(self.minerPos, "SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT")
self.activate_spork(self.minerPos, "SPORK_13_ENABLE_SUPERBLOCKS")
txinfo = self.miner.gettransaction(proposalFeeTxId)
assert_equal(txinfo['amount'], -50.00)
self.log.info("submitting the budget proposal..")
proposalHash = self.miner.submitbudget(
firstProposalName,
firstProposalLink,
firstProposalCycles,
nextSuperBlockHeight,
firstProposalAddress,
firstProposalAmountPerCycle,
proposalFeeTxId)
# let's wait a little bit and see if all nodes are sync
time.sleep(1)
self.check_proposal_existence(firstProposalName, proposalHash)
self.log.info("proposal broadcast successful!")
# Proposal is established after 5 minutes. Mine 7 blocks
# Proposal needs to be on the chain > 5 min.
self.stake(7, [self.remoteOne, self.remoteTwo])
# now let's vote for the proposal with the first MN
self.log.info("broadcasting votes for the proposal now..")
voteResult = self.ownerOne.mnbudgetvote("alias", proposalHash, "yes", self.masternodeOneAlias)
assert_equal(voteResult["detail"][0]["result"], "success")
# check that the vote was accepted everywhere
self.stake(1, [self.remoteOne, self.remoteTwo])
self.check_vote_existence(firstProposalName, self.mnOneTxHash, "YES")
self.log.info("all good, MN1 vote accepted everywhere!")
# now let's vote for the proposal with the second MN
voteResult = self.ownerTwo.mnbudgetvote("alias", proposalHash, "yes", self.masternodeTwoAlias)
assert_equal(voteResult["detail"][0]["result"], "success")
# check that the vote was accepted everywhere
self.stake(1, [self.remoteOne, self.remoteTwo])
self.check_vote_existence(firstProposalName, self.mnTwoTxHash, "YES")
self.log.info("all good, MN2 vote accepted everywhere!")
# Now check the budget
blockStart = nextSuperBlockHeight
blockEnd = blockStart + firstProposalCycles * 145
TotalPayment = firstProposalAmountPerCycle * firstProposalCycles
Allotted = firstProposalAmountPerCycle
RemainingPaymentCount = firstProposalCycles
expected_budget = [
self.get_proposal_obj(firstProposalName, firstProposalLink, proposalHash, proposalFeeTxId, blockStart,
blockEnd, firstProposalCycles, RemainingPaymentCount, firstProposalAddress, 1,
2, 0, 0, Decimal(str(TotalPayment)), Decimal(str(firstProposalAmountPerCycle)),
True, True, Decimal(str(Allotted)), Decimal(str(Allotted)))
]
self.check_budgetprojection(expected_budget)
# Quick block count check.
assert_equal(self.ownerOne.getblockcount(), 276)
self.log.info("starting budget finalization sync test..")
self.stake(5, [self.remoteOne, self.remoteTwo])
# assert that there is no budget finalization first.
assert_true(len(self.ownerOne.mnfinalbudget("show")) == 0)
# suggest the budget finalization and confirm the tx (+4 blocks).
budgetFinHash = self.broadcastbudgetfinalization(self.miner,
with_ping_mns=[self.remoteOne, self.remoteTwo])
assert (budgetFinHash != "")
time.sleep(1)
self.log.info("checking budget finalization sync..")
self.check_budget_finalization_sync(0, "OK")
self.log.info("budget finalization synced!, now voting for the budget finalization..")
self.ownerOne.mnfinalbudget("vote-many", budgetFinHash)
self.ownerTwo.mnfinalbudget("vote-many", budgetFinHash)
self.stake(2, [self.remoteOne, self.remoteTwo])
self.log.info("checking finalization votes..")
self.check_budget_finalization_sync(2, "OK")
self.stake(8, [self.remoteOne, self.remoteTwo])
addrInfo = self.miner.listreceivedbyaddress(0, False, False, firstProposalAddress)
assert_equal(addrInfo[0]["amount"], firstProposalAmountPerCycle)
self.log.info("budget proposal paid!, all good")
# Check that the proposal info returns updated payment count
expected_budget[0]["RemainingPaymentCount"] -= 1
self.check_budgetprojection(expected_budget)
if __name__ == '__main__':
MasternodeGovernanceBasicTest().main()
|
the-stack_0_26881
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import time
from enum import IntEnum
from typing import Tuple
from PyQt5.QtCore import Qt, pyqtSignal, QThread
from PyQt5.QtWidgets import (QTreeWidget, QTreeWidgetItem, QMenu, QGridLayout, QComboBox,
QLineEdit, QDialog, QVBoxLayout, QHeaderView, QCheckBox,
QTabWidget, QWidget, QLabel)
from PyQt5.QtGui import QFontMetrics
from electrum_axe.i18n import _
from electrum_axe import constants, blockchain
from electrum_axe.interface import serialize_server, deserialize_server
from electrum_axe.network import Network
from electrum_axe.logging import get_logger
from .util import Buttons, CloseButton, HelpButton, read_QIcon, char_width_in_lineedit
_logger = get_logger(__name__)
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, network, config, network_updated_signal_obj):
QDialog.__init__(self)
self.setWindowTitle(_('Electrum Network'))
self.setMinimumSize(500, 300)
self.nlayout = NetworkChoiceLayout(network, config)
self.network_updated_signal_obj = network_updated_signal_obj
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal_obj.network_updated_signal.connect(
self.on_update)
network.register_callback(self.on_network, ['network_updated'])
def on_network(self, event, *args):
self.network_updated_signal_obj.network_updated_signal.emit(event, args)
def on_update(self):
self.nlayout.update()
class NodesListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Connected node'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
is_server = not bool(item.data(0, Qt.UserRole))
menu = QMenu()
if is_server:
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
else:
chain_id = item.data(1, Qt.UserRole)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(chain_id))
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, network: Network):
self.clear()
self.addChild = self.addTopLevelItem
chains = network.get_blockchains()
n_chains = len(chains)
for chain_id, interfaces in chains.items():
b = blockchain.blockchains.get(chain_id)
if b is None: continue
name = b.get_name()
if n_chains > 1:
x = QTreeWidgetItem([name + '@%d'%b.get_max_forkpoint(), '%d'%b.height()])
x.setData(0, Qt.UserRole, 1)
x.setData(1, Qt.UserRole, b.get_id())
else:
x = self
for i in interfaces:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([i.host + star, '%d'%i.tip])
item.setData(0, Qt.UserRole, 0)
item.setData(1, Qt.UserRole, i.server)
x.addChild(item)
if n_chains > 1:
self.addTopLevelItem(x)
x.setExpanded(True)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
super().update()
class ServerListWidget(QTreeWidget):
class Columns(IntEnum):
HOST = 0
PORT = 1
SERVER_STR_ROLE = Qt.UserRole + 100
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Host'), _('Port')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
menu = QMenu()
server = item.data(self.Columns.HOST, self.SERVER_STR_ROLE)
menu.addAction(_("Use as server"), lambda: self.set_server(server))
menu.exec_(self.viewport().mapToGlobal(position))
def set_server(self, s):
host, port, protocol = deserialize_server(s)
self.parent.server_host.setText(host)
self.parent.server_port.setText(port)
self.parent.set_server()
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, servers, protocol, use_tor):
self.clear()
for _host, d in sorted(servers.items()):
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
x = QTreeWidgetItem([_host, port])
server = serialize_server(_host, port, protocol)
x.setData(self.Columns.HOST, self.SERVER_STR_ROLE, server)
self.addTopLevelItem(x)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(self.Columns.HOST, QHeaderView.Stretch)
h.setSectionResizeMode(self.Columns.PORT, QHeaderView.ResizeToContents)
super().update()
class NetworkChoiceLayout(object):
def __init__(self, network: Network, config, wizard=False):
self.network = network
self.config = config
self.protocol = None
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
server_tab = QWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(server_tab, _('Server'))
tabs.addTab(proxy_tab, _('Proxy'))
fixed_width_hostname = 24 * char_width_in_lineedit()
fixed_width_port = 6 * char_width_in_lineedit()
# server tab
grid = QGridLayout(server_tab)
grid.setSpacing(8)
self.server_host = QLineEdit()
self.server_host.setFixedWidth(fixed_width_hostname)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(fixed_width_port)
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.server_host.editingFinished.connect(self.set_server)
self.server_port.editingFinished.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Axe Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Axe Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 0, 0, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_host, 1, 1, 1, 2)
grid.addWidget(self.server_port, 1, 3)
label = _('Server peers') if network.is_connected() else _('Default Servers')
grid.addWidget(QLabel(label), 2, 0, 1, 5)
self.servers_list = ServerListWidget(self)
grid.addWidget(self.servers_list, 3, 0, 1, 5)
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_cb.setEnabled(self.config.is_modifiable('proxy'))
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(fixed_width_hostname)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(fixed_width_port)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = QLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setEchoMode(QLineEdit.Password)
self.proxy_password.setFixedWidth(fixed_width_port)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(read_QIcon("tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
self.tor_auto_on_cb = QCheckBox(self.network.TOR_AUTO_ON_MSG)
self.tor_auto_on_cb.setIcon(read_QIcon("tor_logo.png"))
self.tor_auto_on_cb.setChecked(self.config.get('tor_auto_on', True))
self.tor_auto_on_cb.clicked.connect(self.use_tor_auto_on)
self.fiat_bypass_tor_cb = QCheckBox(self.network.FIAT_BYPASS_TOR_MSG)
fiat_bypass_tor = self.config.get('fiat_bypass_tor', False)
self.fiat_bypass_tor_cb.setChecked(fiat_bypass_tor)
self.fiat_bypass_tor_cb.clicked.connect(self.fiat_bypass_tor)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Axe Electrum servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.addWidget(self.tor_auto_on_cb, 6, 0, 1, 3)
grid.addWidget(HelpButton(_('During wallet startup try to detect and use Tor Proxy.')), 6, 4)
grid.addWidget(self.fiat_bypass_tor_cb, 7, 0, 1, 3)
grid.setRowStretch(8, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Axe Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.server_label = QLabel('')
msg = _("Axe Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_label, 1, 1, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 2, 0)
grid.addWidget(self.height_label, 2, 1)
grid.addWidget(HelpButton(msg), 2, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 3, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 5, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.fill_in_proxy_settings()
self.update()
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_host.setEnabled(enabled)
self.server_port.setEnabled(enabled)
self.servers_list.setEnabled(enabled)
else:
for w in [self.autoconnect_cb, self.server_host, self.server_port, self.servers_list]:
w.setEnabled(False)
def update(self):
net_params = self.network.get_parameters()
host, port, protocol = net_params.host, net_params.port, net_params.protocol
proxy_config, auto_connect = net_params.proxy, net_params.auto_connect
if not self.server_host.hasFocus() and not self.server_port.hasFocus():
self.server_host.setText(host)
self.server_port.setText(str(port))
self.autoconnect_cb.setChecked(auto_connect)
interface = self.network.interface
host = interface.host if interface else _('None')
self.server_label.setText(host)
self.set_protocol(protocol)
self.servers = self.network.get_servers()
self.servers_list.update(self.servers, self.protocol, self.tor_cb.isChecked())
self.enable_set_server()
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to {0} nodes.").format(n) if n else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains) > 1:
chain = self.network.blockchain()
forkpoint = chain.get_max_forkpoint()
name = chain.get_name()
msg = _('Chain split detected at block {0}').format(forkpoint) + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(self.network)
def fill_in_proxy_settings(self):
proxy_config = self.network.get_parameters().proxy
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
def change_protocol(self, use_ssl):
p = 's' if use_ssl else 't'
host = self.server_host.text()
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if p not in pp.keys():
p = list(pp.keys())[0]
port = pp[p]
self.server_host.setText(host)
self.server_port.setText(port)
self.set_protocol(p)
self.set_server()
def follow_branch(self, chain_id):
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
self.update()
def follow_server(self, server):
self.network.run_from_another_thread(self.network.follow_chain_given_server(server))
self.update()
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if protocol and protocol not in protocol_letters:
protocol = None
if protocol:
port = pp.get(protocol)
if port is None:
protocol = None
if not protocol:
if 's' in pp.keys():
protocol = 's'
port = pp.get(protocol)
else:
protocol = list(pp.keys())[0]
port = pp.get(protocol)
self.server_host.setText(host)
self.server_port.setText(port)
def accept(self):
pass
def set_server(self):
net_params = self.network.get_parameters()
net_params = net_params._replace(host=str(self.server_host.text()),
port=str(self.server_port.text()),
auto_connect=self.autoconnect_cb.isChecked())
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def set_proxy(self):
net_params = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
net_params = net_params._replace(proxy=proxy)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def suggest_proxy(self, found_proxy):
if found_proxy is None:
self.tor_cb.hide()
return
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if (self.proxy_cb.isChecked()
and self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5')
and self.proxy_host.text() == "127.0.0.1"
and self.proxy_port.text() == str(found_proxy[1])):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
_logger.info("can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def fiat_bypass_tor(self, bypass):
self.config.set_key('fiat_bypass_tor', bypass, False)
coro = self.network.restart()
self.network.run_from_another_thread(coro)
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
def use_tor_auto_on(self, use_it):
self.config.set_key('tor_auto_on', use_it, True)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
while True:
for p in ports:
net_addr = ("127.0.0.1", p)
if TorDetector.is_tor_port(net_addr):
try:
self.found_proxy.emit(net_addr)
except AttributeError as e:
_logger.info('found_proxy signal is already unbound')
return
break
else:
try:
self.found_proxy.emit(None)
except AttributeError:
_logger.info('found_proxy signal is already unbound')
return
time.sleep(10)
@staticmethod
def is_tor_port(net_addr: Tuple[str, int]) -> bool:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect(net_addr)
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
|
the-stack_0_26882
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("//antlir/bzl:shape.bzl", "shape")
load("//antlir/bzl:target_tagger.shape.bzl", "target_tagged_image_source_t")
tarball_t = shape.shape(
force_root_ownership = shape.field(bool, optional = True),
into_dir = shape.path,
source = target_tagged_image_source_t,
)
|
the-stack_0_26884
|
import datetime
import logging
import threading
import time
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import CommandError
from djutils.management.commands.queue_consumer import Command as QueueConsumer
from djutils.queue.decorators import crontab, queue_command, periodic_command
from djutils.queue.queue import QueueCommand, PeriodicQueueCommand, QueueException, invoker
from djutils.queue.registry import registry
from djutils.test import TestCase
from djutils.utils.helpers import ObjectDict
class DummyThreadQueue():
"""A replacement for the stdlib Queue.Queue"""
def put(self, message):
command = registry.get_command_for_message(message)
command.execute()
def join(self):
pass
class TestQueueConsumer(QueueConsumer):
"""Subclass of the consumer for test purposes"""
def get_logger(self, verbosity):
return logging.getLogger('djutils.tests.queue.logger')
def initialize_options(self, options):
super(TestQueueConsumer, self).initialize_options(options)
self._queue = DummyThreadQueue()
class UserCommand(QueueCommand):
def execute(self):
user, old_email, new_email = self.data
user.email = new_email
user.save()
@queue_command
def user_command(user, data):
user.email = data
user.save()
class BampfException(Exception):
pass
@queue_command
def throw_error():
raise BampfException('bampf')
class TestPeriodicCommand(PeriodicQueueCommand):
def execute(self):
User.objects.create_user('thirty', 'thirty', 'thirty')
def validate_datetime(self, dt):
return crontab(minute='*/30')(dt)
@periodic_command(crontab(minute='*/15'))
def every_fifteen():
User.objects.create_user('fifteen', 'fifteen', 'fifteen')
class QueueTest(TestCase):
def setUp(self):
self.orig_always_eager = getattr(settings, 'QUEUE_ALWAYS_EAGER', False)
settings.QUEUE_ALWAYS_EAGER = False
self.dummy = User.objects.create_user('username', '[email protected]', 'password')
self.consumer_options = ObjectDict(
logfile='',
delay=.1,
backoff=2,
max_delay=.4,
no_periodic=False,
threads=2,
verbosity=1,
)
invoker.flush()
def tearDown(self):
settings.QUEUE_ALWAYS_EAGER = self.orig_always_eager
def test_basic_processing(self):
# make sure UserCommand got registered
self.assertTrue('djutils.tests.queue.UserCommand' in registry)
self.assertEqual(registry._registry['djutils.tests.queue.UserCommand'], UserCommand)
# create a command
command = UserCommand((self.dummy, self.dummy.email, '[email protected]'))
# enqueueing the command won't execute it - it just hangs out
invoker.enqueue(command)
# did the message get enqueued?
self.assertEqual(len(invoker.queue), 1)
# dequeueing loads from the queue, creates a command and executes it
invoker.dequeue()
# make sure the command's execute() method got called
dummy = User.objects.get(username='username')
self.assertEqual(dummy.email, '[email protected]')
def test_decorated_function(self):
user_command(self.dummy, '[email protected]')
self.assertEqual(len(invoker.queue), 1)
# the user's email address hasn't changed yet
dummy = User.objects.get(username='username')
self.assertEqual(dummy.email, '[email protected]')
# dequeue
invoker.dequeue()
# make sure that the command was executed
dummy = User.objects.get(username='username')
self.assertEqual(dummy.email, '[email protected]')
self.assertEqual(len(invoker.queue), 0)
def test_always_eager(self):
settings.QUEUE_ALWAYS_EAGER = True
user_command(self.dummy, '[email protected]')
self.assertEqual(len(invoker.queue), 0)
# the user's email address was changed
dummy = User.objects.get(username='username')
self.assertEqual(dummy.email, '[email protected]')
def test_error_raised(self):
throw_error()
self.assertRaises(BampfException, invoker.dequeue)
def test_crontab_month(self):
# validates the following months, 1, 4, 7, 8, 9
valids = [1, 4, 7, 8, 9]
validate_m = crontab(month='1,4,*/6,8-9')
for x in xrange(1, 13):
res = validate_m(datetime.datetime(2011, x, 1))
self.assertEqual(res, x in valids)
def test_crontab_day(self):
# validates the following days
valids = [1, 4, 7, 8, 9, 13, 19, 25, 31]
validate_d = crontab(day='*/6,1,4,8-9')
for x in xrange(1, 32):
res = validate_d(datetime.datetime(2011, 1, x))
self.assertEqual(res, x in valids)
def test_crontab_hour(self):
# validates the following hours
valids = [0, 1, 4, 6, 8, 9, 12, 18]
validate_h = crontab(hour='8-9,*/6,1,4')
for x in xrange(24):
res = validate_h(datetime.datetime(2011, 1, 1, x))
self.assertEqual(res, x in valids)
edge = crontab(hour=0)
self.assertTrue(edge(datetime.datetime(2011, 1, 1, 0, 0)))
self.assertFalse(edge(datetime.datetime(2011, 1, 1, 12, 0)))
def test_crontab_minute(self):
# validates the following minutes
valids = [0, 1, 4, 6, 8, 9, 12, 18, 24, 30, 36, 42, 48, 54]
validate_m = crontab(minute='4,8-9,*/6,1')
for x in xrange(60):
res = validate_m(datetime.datetime(2011, 1, 1, 1, x))
self.assertEqual(res, x in valids)
def test_crontab_day_of_week(self):
# validates the following days of week
# jan, 1, 2011 is a saturday
valids = [2, 4, 9, 11, 16, 18, 23, 25, 30]
validate_dow = crontab(day_of_week='0,2')
for x in xrange(1, 32):
res = validate_dow(datetime.datetime(2011, 1, x))
self.assertEqual(res, x in valids)
def test_crontab_all_together(self):
# jan 1, 2011 is a saturday
# may 1, 2011 is a sunday
validate = crontab(
month='1,5',
day='1,4,7',
day_of_week='0,6',
hour='*/4',
minute='1-5,10-15,50'
)
self.assertTrue(validate(datetime.datetime(2011, 5, 1, 4, 11)))
self.assertTrue(validate(datetime.datetime(2011, 5, 7, 20, 50)))
self.assertTrue(validate(datetime.datetime(2011, 1, 1, 0, 1)))
# fails validation on month
self.assertFalse(validate(datetime.datetime(2011, 6, 4, 4, 11)))
# fails validation on day
self.assertFalse(validate(datetime.datetime(2011, 1, 6, 4, 11)))
# fails validation on day_of_week
self.assertFalse(validate(datetime.datetime(2011, 1, 4, 4, 11)))
# fails validation on hour
self.assertFalse(validate(datetime.datetime(2011, 1, 1, 1, 11)))
# fails validation on minute
self.assertFalse(validate(datetime.datetime(2011, 1, 1, 4, 6)))
def test_registry_get_periodic_commands(self):
# three, one for the base class, one for the TestPeriodicCommand, and
# one for the decorated function
self.assertEqual(len(registry.get_periodic_commands()), 3)
def test_periodic_command_registration(self):
# make sure TestPeriodicCommand got registered
self.assertTrue('djutils.tests.queue.TestPeriodicCommand' in registry)
self.assertEqual(registry._registry['djutils.tests.queue.TestPeriodicCommand'], TestPeriodicCommand)
# create a command
command = TestPeriodicCommand()
# enqueueing the command won't execute it - it just hangs out
invoker.enqueue(command)
# check that there are no users in the db
self.assertEqual(User.objects.all().count(), 1)
# did the message get enqueued?
self.assertEqual(len(invoker.queue), 1)
# dequeueing loads from the queue, creates a command and executes it
invoker.dequeue()
# a new user should have been added
self.assertEqual(User.objects.all().count(), 2)
def test_periodic_command_enqueueing(self):
on_time = datetime.datetime(2011, 1, 1, 1, 15) # matches */15
off_time = datetime.datetime(2011, 1, 1, 1, 16) # doesn't match */15
both_time = datetime.datetime(2011, 1, 1, 1, 30)
# there should be nothing in the queue
self.assertEqual(len(invoker.queue), 0)
# no commands should be enqueued
invoker.enqueue_periodic_commands(off_time)
self.assertEqual(len(invoker.queue), 0)
# running it at 1:15 will pick up the */15 command
invoker.enqueue_periodic_commands(on_time)
self.assertEqual(len(invoker.queue), 1)
# dequeue and execute, should get a new user named 'fifteen'
invoker.dequeue()
# verify user created, then delete the user
self.assertEqual(User.objects.filter(username='fifteen').count(), 1)
User.objects.all().delete()
# make sure the queue is empty
self.assertEqual(len(invoker.queue), 0)
# running it at :30 will pick up both the */15 and the */30 commands
invoker.enqueue_periodic_commands(both_time)
self.assertEqual(len(invoker.queue), 2)
# execute both commands
invoker.dequeue()
invoker.dequeue()
# check that the users were created
self.assertEqual(User.objects.all().count(), 2)
self.assertEqual(User.objects.filter(username='fifteen').count(), 1)
self.assertEqual(User.objects.filter(username='thirty').count(), 1)
def test_daemon_initialization(self):
consumer = TestQueueConsumer()
db_name = 'testqueue'
consumer.initialize_options(self.consumer_options)
self.assertEqual(consumer.logfile, '/var/log/djutils-%s.log' % db_name)
self.assertEqual(consumer.delay, 0.1)
self.assertEqual(consumer.max_delay, 0.4)
self.assertEqual(consumer.backoff_factor, 2)
self.assertEqual(consumer.periodic_commands, True)
self.assertEqual(consumer.threads, 2)
self.consumer_options['logfile'] = '/var/log/custom.log'
consumer.initialize_options(self.consumer_options)
self.consumer_options['backoff'] = 0.5
self.assertRaises(CommandError, consumer.initialize_options, self.consumer_options)
self.consumer_options['backoff'] = 2
self.consumer_options['threads'] = 0
self.assertRaises(CommandError, consumer.initialize_options, self.consumer_options)
def test_consumer_delay(self):
consumer = TestQueueConsumer()
consumer.initialize_options(self.consumer_options)
# processing when there is no message will sleep
start = time.time()
consumer.process_message()
end = time.time()
# make sure it slept the initial amount
self.assertTrue(.09 < end - start < .11)
# try processing another message -- will delay longer
start = time.time()
consumer.process_message()
end = time.time()
self.assertTrue(.19 < end - start < .21)
# cause a command to be enqueued
user_command(self.dummy, '[email protected]')
dummy = User.objects.get(username='username')
self.assertEqual(dummy.email, '[email protected]')
# processing the message will reset the delay to initial state
consumer.process_message()
# make sure the command was executed
dummy = User.objects.get(username='username')
self.assertEqual(dummy.email, '[email protected]')
# make sure the delay was reset
self.assertEqual(consumer.delay, .1)
def test_daemon_multithreading(self):
pass
def test_daemon_periodic_commands(self):
pass
def test_daemon_worker_exception(self):
pass
def test_daemon_periodic_thread_exception(self):
pass
|
the-stack_0_26885
|
import argparse
import os
import pickle
import sys
from datetime import datetime
import numpy as np
import torch
import iic.archs as archs
from iic.utils.cluster.cluster_eval import \
_get_assignment_data_matches
from iic.utils.cluster.transforms import sobel_process
from iic.utils.segmentation.data import make_Coco_dataloaders, \
make_Potsdam_dataloaders
from iic.utils.segmentation.render import render
from iic.utils.segmentation.segmentation_eval import \
_segmentation_get_data, segmentation_eval
# Render images for segmentation models
parser = argparse.ArgumentParser()
parser.add_argument("--model_inds", type=int, nargs="+", default=[])
parser.add_argument("--net_name", type=str, default="best")
parser.add_argument("--imgs_dataloaders", type=str, nargs="+", default=["test"])
parser.add_argument("--num", type=int, default=100)
parser.add_argument("--reassess_acc", default=False, action="store_true")
parser.add_argument("--get_match_only", default=False, action="store_true")
args = parser.parse_args()
model_inds = args.model_inds
epochs = args.epochs
net_name_prefix = args.net_name
num = args.num
reassess_acc = args.reassess_acc
print("imgs_dataloaders passed:")
print(args.imgs_dataloaders)
out_root = "/scratch/shared/slow/xuji/iid_private"
for model_ind in model_inds:
out_dir = os.path.join(out_root, str(model_ind))
net_names = [net_name_prefix + "_net.pytorch"]
reloaded_config_path = os.path.join(out_dir, "config.pickle")
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
assert (config.model_ind == model_ind)
if not hasattr(config, "use_doersch_datasets"):
config.use_doersch_datasets = False
if "Coco" in config.dataset:
dataloaders_train, mapping_assignment_dataloader, mapping_test_dataloader \
= make_Coco_dataloaders(config)
all_label_names = [
"sky-stuff",
"plant-stuff",
"ground-stuff",
]
if config.include_things_labels:
all_label_names += ["person-things"]
if config.incl_animal_things:
all_label_names += ["animal-things"]
elif config.dataset == "Potsdam":
dataloaders_train, mapping_assignment_dataloader, mapping_test_dataloader \
= make_Potsdam_dataloaders(config)
if config.use_coarse_labels:
all_label_names = ["roads and cars",
"buildings and clutter",
"vegetation and trees"]
else:
all_label_names = ["roads",
"buildings",
"vegetation",
"trees",
"cars",
"clutter"]
assert (len(all_label_names) == config.gt_k)
print("dataloader sizes: %d %d %d" % (len(dataloaders_train[0]),
len(mapping_assignment_dataloader),
len(mapping_test_dataloader)))
# ------------------------------
for imgs_dataloader_name in args.imgs_dataloaders:
for net_name in net_names:
print("%s %s %s" % (
config.out_dir, imgs_dataloader_name, net_name.split(".")[0]))
net_name_outdir = os.path.join(config.out_dir,
imgs_dataloader_name,
net_name.split(".")[0])
if not os.path.exists(net_name_outdir):
os.makedirs(net_name_outdir)
print("doing net_name %s to %s" % (net_name, net_name_outdir))
sys.stdout.flush()
# load model
net = archs.__dict__[config.arch](config)
model_path = os.path.join(config.out_dir, net_name)
print("getting model path %s " % model_path)
net.load_state_dict(
torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
net = torch.nn.DataParallel(net)
net.module.eval()
if reassess_acc:
print("... reassessing acc %s" % datetime.now())
sys.stdout.flush()
stats_dict = segmentation_eval(config, net,
mapping_assignment_dataloader,
mapping_test_dataloader,
sobel=(not config.no_sobel),
return_only=True,
verbose=0)
acc = stats_dict["best"]
print("... reassessment finished, got acc %f" % acc)
sys.stdout.flush()
continue
print(
"starting to run test data through for rendering %s" % datetime.now())
all_matches, all_accs = _get_assignment_data_matches(net,
mapping_assignment_dataloader,
config, sobel=(not config.no_sobel),
using_IR=config.using_IR,
get_data_fn=_segmentation_get_data,
just_matches=False,
verbose=1)
head_i = np.argmax(all_accs)
match = all_matches[head_i]
print("got best head %d %s" % (head_i, datetime.now()))
print("best match %s" % str(match))
if args.get_match_only:
exit(0)
colour_map_raw = [(np.random.rand(3) * 255.).astype(np.uint8)
for _ in range(max(config.output_k, config.gt_k))]
# coco: green (veg) (7, 130, 42), blue (sky) (39, 159, 216),
# grey (road) (82, 91, 96), red (person - if used) (229, 57, 57)
if "Coco" in config.dataset:
colour_map_gt = [np.array([39, 159, 216], dtype=np.uint8),
np.array([7, 130, 42], dtype=np.uint8),
np.array([82, 91, 96], dtype=np.uint8),
np.array([229, 57, 57], dtype=np.uint8)
]
else:
colour_map_gt = colour_map_raw
# render first batch
predicted_all = [0 for _ in range(config.gt_k)]
correct_all = [0 for _ in range(config.gt_k)]
all_all = [0 for _ in range(config.gt_k)]
if imgs_dataloader_name == "test":
imgs_dataloader = mapping_test_dataloader
elif imgs_dataloader_name == "train":
imgs_dataloader = mapping_assignment_dataloader
else:
assert (False)
print("length of imgs_dataloader %d" % len(imgs_dataloader))
next_img_ind = 0
for b_i, batch in enumerate(imgs_dataloader):
orig_imgs, flat_targets, mask = batch
orig_imgs, flat_targets, mask = \
orig_imgs.cuda(), flat_targets.numpy(), mask.numpy().astype(np.bool)
if not config.no_sobel:
imgs = sobel_process(orig_imgs, config.include_rgb,
using_IR=config.using_IR)
else:
imgs = orig_imgs
with torch.no_grad():
x_outs_all = net(imgs)
x_outs = x_outs_all[head_i]
x_outs = x_outs.cpu().numpy()
flat_preds = np.argmax(x_outs, axis=1)
n, h, w = flat_preds.shape
num_imgs_curr = flat_preds.shape[0]
reordered_preds = np.zeros((num_imgs_curr, h, w),
dtype=flat_targets.dtype)
for pred_i, target_i in match:
reordered_preds[flat_preds == pred_i] = target_i
assert (mask.shape == reordered_preds.shape)
assert (flat_targets.shape == reordered_preds.shape)
masked = np.logical_not(mask)
reordered_preds[masked] = -1
flat_targets[masked] = -1 # not in colourmaps, hence will be black
assert (reordered_preds.max() < config.gt_k)
assert (flat_targets.max() < config.gt_k)
# print iou per class
for c in range(config.gt_k):
preds = (reordered_preds == c)
targets = (flat_targets == c)
predicted = preds.sum()
correct = (preds * targets).sum()
all = ((preds + targets) >= 1).sum()
predicted_all[c] += predicted
correct_all[c] += correct
all_all[c] += all
if next_img_ind >= num:
print("not rendering batch")
continue # already rendered num
elif next_img_ind + num_imgs_curr > num:
relevant_inds = range(0, num - next_img_ind)
else:
relevant_inds = range(0, num_imgs_curr)
orig_imgs = orig_imgs[relevant_inds, :, :, :]
imgs = imgs[relevant_inds, :, :, :]
flat_preds = flat_preds[relevant_inds, :, :]
reordered_preds = reordered_preds[relevant_inds, :, :]
flat_targets = flat_targets[relevant_inds, :, :]
if "Coco" in config.dataset:
# blue and red channels are swapped
orig_imgs_swapped = torch.zeros(orig_imgs.shape,
dtype=orig_imgs.dtype)
orig_imgs_swapped[:, 0, :, :] = orig_imgs[:, 2, :, :]
orig_imgs_swapped[:, 1, :, :] = orig_imgs[:, 1, :, :]
orig_imgs_swapped[:, 2, :, :] = orig_imgs[:, 0, :, :] # ignore others
render(orig_imgs_swapped, mode="image", name=("%d_img" % model_ind),
offset=next_img_ind,
out_dir=net_name_outdir)
render(imgs, mode="image_as_feat", name=("%d_img_feat" % model_ind),
offset=next_img_ind,
out_dir=net_name_outdir)
elif "Potsdam" in config.dataset:
render(orig_imgs, mode="image_ir", name=("%d_img" % model_ind),
offset=next_img_ind,
out_dir=net_name_outdir)
render(flat_preds, mode="preds", name=("%d_raw_preds" % model_ind),
offset=next_img_ind,
colour_map=colour_map_raw,
out_dir=net_name_outdir)
render(reordered_preds, mode="preds",
name=("%d_reordered_preds" % model_ind),
offset=next_img_ind,
colour_map=colour_map_gt,
out_dir=net_name_outdir)
render(flat_targets, mode="preds", name=("%d_targets" % model_ind),
offset=next_img_ind,
colour_map=colour_map_gt,
out_dir=net_name_outdir)
next_img_ind += num_imgs_curr
print("... rendered batch %d, next_img_ind %d " % (b_i, next_img_ind))
sys.stdout.flush()
for c in range(config.gt_k):
iou = correct_all[c] / float(all_all[c])
print("class %d: name %s: pred %d correct %d all %d %f iou" %
(c, all_label_names[c], predicted_all[c], correct_all[c],
all_all[c], iou))
|
the-stack_0_26886
|
import time
from contextlib import suppress
import pytest
from _pytest.fixtures import FixtureLookupError
from junit_report import JunitTestSuite
from test_infra.consts import OperatorStatus
from tests.base_test import BaseTest
from tests.config import ClusterConfig
from tests.conftest import get_available_openshift_versions, get_api_client
class TestInstall(BaseTest):
@pytest.fixture
def new_cluster_configuration(self, request):
# Overriding the default BaseTest.new_cluster_configuration fixture to set the openshift version.
config = ClusterConfig()
with suppress(FixtureLookupError):
# Resolving the param value.
version = request.getfixturevalue("openshift_version")
config.openshift_version = version
return config
@JunitTestSuite()
@pytest.mark.parametrize("openshift_version", get_available_openshift_versions())
def test_install(self, cluster, openshift_version):
cluster.prepare_for_installation()
cluster.start_install_and_wait_for_installed()
@JunitTestSuite()
@pytest.mark.parametrize("operators", sorted(get_api_client().get_supported_operators()))
def test_olm_operator(self, configs, get_nodes, get_cluster, operators, update_olm_config):
cluster_config, tf_config = configs
update_olm_config(tf_config=tf_config, cluster_config=cluster_config, operators=operators)
new_cluster = get_cluster(get_nodes(tf_config, cluster_config), cluster_config)
new_cluster.prepare_for_installation()
new_cluster.start_install_and_wait_for_installed()
assert new_cluster.is_operator_in_status(operators, OperatorStatus.AVAILABLE)
|
the-stack_0_26887
|
import os
import json
from enum import Enum
from attributes import AttributeSet
from entity import Entity
from entity_database import EntityDatabase
ItemType = Enum("ItemType", "WEAPON ARMOR HEALING")
base = os.path.dirname(__file__)
data_file = os.path.join(base, "..", "data", "items.json")
item_database = None
########################################################################
class Item(Entity):
####################################################################
def __init__(self):
super(Item, self).__init__()
self.type = ItemType.ARMOR
self.min = 0
self.max = 0
self.speed = 0
self.price = 0
self.attributes = AttributeSet()
####################################################################
@staticmethod
def deserialize_from_dict(item_data):
item = Item()
for field, value in item_data.items():
if field in ["id", "name", "min", "max", "speed", "price"]:
setattr(item, field, value)
elif field == "type":
item.type = getattr(ItemType, value)
else:
setattr(item.attributes, field, value)
return item
########################################################################
class ItemDatabase(EntityDatabase):
####################################################################
def __init__(self):
super(ItemDatabase, self).__init__()
global item_database
item_database = self
####################################################################
@staticmethod
def load(force=False):
global item_database
if item_database is None or force:
item_database = ItemDatabase()
items_data = json.load(open(data_file))
for item_data in items_data:
item = Item.deserialize_from_dict(item_data)
item_database.by_id[item.id] = item
item_database.by_name[item.name.lower()] = item
return item_database
|
the-stack_0_26888
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Dashboard storage
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import datetime
# Third-party modules
import six
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import (
StringField,
DateTimeField,
ListField,
IntField,
BinaryField,
EmbeddedDocumentField,
)
# NOC modules
from noc.aaa.models.user import User
from noc.aaa.models.group import Group
from noc.core.mongo.fields import ForeignKeyField
DAL_NONE = -1
DAL_RO = 0
DAL_MODIFY = 1
DAL_ADMIN = 2
class DashboardAccess(EmbeddedDocument):
user = ForeignKeyField(User)
group = ForeignKeyField(Group)
level = IntField(choices=[(DAL_RO, "Read-only"), (DAL_MODIFY, "Modify"), (DAL_ADMIN, "Admin")])
@six.python_2_unicode_compatible
class Dashboard(Document):
meta = {
"collection": "noc.dashboards",
"strict": False,
"auto_create_index": False,
"indexes": ["owner", "tags"],
}
title = StringField()
# Username
owner = ForeignKeyField(User)
#
description = StringField()
#
tags = ListField(StringField())
# Config format version
format = IntField(default=1)
# gzip'ed data
config = BinaryField()
#
created = DateTimeField(default=datetime.datetime.now)
changed = DateTimeField(default=datetime.datetime.now)
#
access = ListField(EmbeddedDocumentField(DashboardAccess))
def __str__(self):
return self.title
def get_user_access(self, user):
# Direct match as owner
if user == self.owner or user.is_superuser:
return DAL_ADMIN
level = DAL_NONE
groups = user.groups.all()
for ar in self.access:
if ar.user and ar.user == user:
level = max(level, ar.level)
if ar.group and ar.group in groups:
level = max(level, ar.level)
if level == DAL_ADMIN:
return level
return level
def save(
self,
force_insert=False,
validate=True,
clean=True,
write_concern=None,
cascade=None,
cascade_kwargs=None,
_refs=None,
save_condition=None,
**kwargs
):
# Split DashBoard Acces to {User, level}, {Group, level}
# self.update(add_to_set__access=[parent_1, parent_2, parent_1])
if "access" in getattr(self, "_changed_fields", []):
# Check unique
processed = []
access = []
for da in sorted(self.access, reverse=True):
# Deduplicate rights
# @todo changing priority (reverse order)
if da.user and "u%d" % da.user.id in processed:
continue
elif da.group and "g%d" % da.group.id in processed:
continue
if da.user and da.group:
# Split User and Group rights
access += [
DashboardAccess(user=da.user.id, level=da.level),
DashboardAccess(group=da.group.id, level=da.level),
]
processed += ["u%d" % da.user.id, "g%d" % da.group.id]
continue
access += [da]
if da.user:
processed += ["u%d" % da.user.id]
if da.group:
processed += ["g%d" % da.group.id]
self.access = access
super(Dashboard, self).save(
force_insert=force_insert,
validate=validate,
clean=clean,
write_concern=write_concern,
cascade=cascade,
cascade_kwargs=cascade_kwargs,
_refs=_refs,
save_condition=save_condition,
**kwargs
)
def clean_access(self, item=None):
"""
Clean access rights
update2 = {"$push": {"access": {"$each": [{"user": i.user.id, "level": i.level} for i in items]}}}
:param item: All, user, group
:return:
"""
match = {"_id": self.id}
if item == "user":
update = {"$pull": {"access": {"user": {"$exists": True}}}}
elif item == "group":
update = {"$pull": {"access": {"group": {"$exists": True}}}}
else:
update = {"$pull": "access"}
self._get_collection().update(match, update)
|
the-stack_0_26889
|
"""A collection of functions to manipulate polynomials and their coefficients
Authors
-------
- Colin Cox
- Johannes Sahlmann (minor contributions and fixes)
References
----------
"""
from __future__ import absolute_import, print_function, division
import numpy as np
import pylab as pl
import scipy as sp
from scipy import linalg
def choose(n, r):
"""The number of ways of choosing r items from n"""
if n < 0 or r < 0:
print('Negative values not allowed')
return 0
if r > n:
print('r must not be greater than n')
return 0
combin = 1
if r > n / 2:
r1 = n - r
else:
r1 = r
for k in range(r1):
combin = combin * (n - k) // (k + 1)
return combin
def dpdx(a, x, y, order=4):
"""Differential with respect to x
:param a:
:param x:
:param y:
:param order:
:return:
"""
dpdx = 0.0
k = 1 # index for coefficients
for i in range(1, order + 1):
for j in range(i + 1):
if i - j > 0:
dpdx = dpdx + (i - j) * a[k] * x ** (i - j - 1) * y ** j
k += 1
return dpdx
def dpdy(a, x, y, order=4):
"""Differential with respect to y
:param a:
:param x:
:param y:
:param order:
:return:
"""
dpdy = 0.0
k = 1 # index for coefficients
for i in range(1, order + 1):
for j in range(i + 1):
if j > 0:
dpdy = dpdy + j * a[k] * x ** (i - j) * y ** (j - 1)
k += 1
return dpdy
def flatten(A, order):
"""Convert triangular layout to linear array"""
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = A[i, j]
k += 1
return AF
def FlipX(A, order=4):
"""Change sign of all coefficients with odd x power"""
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = (-1)**(i-j)*A[k]
k += 1
return AF
def FlipXY(A, order=4):
"Change sign for coeffs where sum of x and y powers is odd"
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = (-1)**(i)*A[k]
k += 1
return AF
def FlipY(A, order = 4):
"""Change sign of all coefficients with odd y power"""
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = (-1)**(j)*A[k]
k += 1
return AF
def invert(a, b, u, v, n, verbose=False):
"""Given that order n polynomials of (x,y) have the result (u,v), find (x,y)
Newton Raphson method in two dimensions"""
tol = 1.0e-6
err = 1.0
# Initial guesses - Linear approximation
det = a[1] * b[2] - a[2] * b[1]
x0 = (b[2] * u - a[2] * v) / det
y0 = (-b[1] * u + a[1] * v) / det
if verbose:
print('Initial guesses', x0, y0)
x = x0
y = y0
X = sp.array([x, y])
iter = 0
while err > tol:
f1 = sp.array([poly(a, x, y, n) - u, poly(b, x, y, n) - v])
j = sp.array([[dpdx(a, x, y, n), dpdy(a, x, y, n)], [dpdx(b, x, y, n), dpdy(b, x, y, n)]])
invj = sp.linalg.inv(j)
X = X - sp.dot(invj, f1)
if verbose:
print('[X1,Y1]', X)
x1 = X[0]
y1 = X[1]
err = sp.hypot(x - x1, y - y1)
if verbose:
print('Error %10.2e' % err)
[x, y] = [x1, y1]
iter += 1
return x, y, err, iter
def jacob(a, b, x, y, order=4):
"""Calculation of Jacobean, or relative area"""
j = dpdx(a, x, y,order)*dpdy(b, x, y,order) - dpdx(b, x, y,order)*dpdy(a, x, y,order)
j = sp.fabs(j)
return j
def nircam_reorder(A, B, order):
"""Changes coefficient order from y**2 xy x**2 to x**2 xy y**2
:param A:
:param B:
:param order:
:return:
"""
terms = (order + 1) * (order + 2) // 2
A2 = np.zeros((terms))
B2 = np.zeros((terms))
for i in range(order + 1):
ti = i * (i + 1) // 2
for j in range(i + 1):
A2[ti + j] = A[ti + i - j]
B2[ti + j] = B[ti + i - j]
return (A2, B2)
def poly(a, x, y, order=4):
"""Return polynomial
:param a:
:param x:
:param y:
:param order:
:return:
"""
pol = 0.0
k = 0 # index for coefficients
for i in range(order+1):
for j in range(i+1):
pol = pol + a[k]*x**(i-j)*y**j
k+=1
return pol
def polyfit(u, x, y, order):
"""Fit polynomial to a set of u values on an x,y grid
u is a function u(x,y) being a polynomial of the form
u = a[i, j] x**(i-j) y**j. x and y can be on a grid or be arbitrary values"""
# First set up x and y powers for each coefficient
px = []
py = []
for i in range(order + 1):
for j in range(i + 1):
px.append(i - j)
py.append(j)
terms = len(px)
# print terms, ' terms for order ', order
# print px
# print py
# Make up matrix and vector
vector = sp.zeros((terms))
mat = sp.zeros((terms, terms))
for i in range(terms):
vector[i] = (u * x ** px[i] * y ** py[i]).sum()
for j in range(terms):
mat[i, j] = (x ** px[i] * y ** py[i] * x ** px[j] * y ** py[j]).sum()
# print 'Vector', vector
# print 'Matrix'
# print mat
imat = linalg.inv(mat)
# print 'Inverse'
# print imat
# Check that inversion worked
# print sp.dot(mat,imat)
coeffs = sp.dot(imat, vector)
return coeffs
def polyfit2(u, x, y, order):
"""Fit polynomial to a set of u values on an x,y grid
u is a function u(x,y) being a polynomial of the form
u = a[i, j]x**(i-j)y**j. x and y can be on a grid or be arbitrary values
This version uses solve instead of matrix inversion"""
# First set up x and y powers for each coefficient
px = []
py = []
for i in range(order + 1):
for j in range(i + 1):
px.append(i - j)
py.append(j)
terms = len(px)
# print terms, ' terms for order ', order
# print px
# print py
# Make up matrix and vector
vector = sp.zeros((terms))
mat = sp.zeros((terms, terms))
for i in range(terms):
vector[i] = (u * x ** px[i] * y ** py[i]).sum() # Summing over all x,y
for j in range(terms):
mat[i, j] = (x ** px[i] * y ** py[i] * x ** px[j] * y ** py[j]).sum()
coeffs = linalg.solve(mat, vector)
return coeffs
def reorder(A, B, verbose=False) :
"""Reorder Sabatke coefficients to Cox convention"""
order = 5
terms = (order+1)*(order+2)//2
Aarray = sp.zeros((order+1,order+1))
Barray = sp.zeros((order+1,order+1))
k1 = 0
for i in range(order+1):
for j in range(order+1-i):
Aarray[j,i] = A[k1]
Barray[j,i] = B[k1]
k1 += 1
A2 = sp.zeros((terms))
B2 = sp.zeros((terms))
k2 = 0
for i in range(order+1):
for j in range(i+1):
A2[k2] = Aarray[j,i-j]
B2[k2] = Barray[j,i-j]
k2 += 1
if verbose:
print('A')
triangle(A2, order)
print('\nB')
triangle(B2, order)
return (A2, B2)
def rescale(A, B, C, D, order, scale):
"""
Change coefficients to arcsec scale
Ported here from makeSIAF.py
J. Sahlmann 2018-01-03
J. Sahlmann 2018-01-04: fixed side-effect on ABCD variables
:param A:
:param B:
:param C:
:param D:
:param order:
:param scale:
:return:
"""
A_scaled = scale*A
B_scaled = scale*B
number_of_coefficients = np.int((order + 1) * (order + 2) / 2)
C_scaled = np.zeros(number_of_coefficients)
D_scaled = np.zeros(number_of_coefficients)
k = 0
for i in range(order+1):
factor = scale**i
for j in range(i+1):
C_scaled[k] = C[k]/factor
D_scaled[k] = D[k]/factor
k += 1
return A_scaled, B_scaled, C_scaled, D_scaled
def Rotate(A,B,theta):
"""
Ported to here from makeSIAF.py
J. Sahlmann 2018-01-03
:param A:
:param B:
:param theta:
:return:
"""
A2 = A*np.cos(theta) + B*np.sin(theta)
B2 = - A*np.sin(theta) + B*np.cos(theta)
return (A2,B2)
def rotate_coefficients(A, B, angle_deg):
""" J. Sahlmann: this version of rotate_coeffs is used in nircam_get_polynomial_both
:param A:
:param B:
:param angle_deg:
:return:
"""
AR = A * np.cos(np.deg2rad(angle_deg)) - B * np.sin(np.deg2rad(angle_deg))
BR = A * np.sin(np.deg2rad(angle_deg)) + B * np.cos(np.deg2rad(angle_deg))
return AR, BR
def RotateCoeffs(a, theta, order=4, verbose=False):
"""Rotate axes of coefficients by theta degrees"""
c = np.cos(np.deg2rad(theta))
s = np.sin(np.deg2rad(theta))
# First place in triangular layout
at = sp.zeros([order+1,order+1])
k = 0
for m in range(order+1):
for n in range(m+1):
at[m, n] = a[k]
k+=1
# Apply rotation
atrotate = sp.zeros([order+1,order+1])
arotate = sp.zeros([len(a)]) # Copy shape of a
for m in range(order+1):
for n in range(m+1):
for mu in range(0,m-n+1):
for j in range(m-n-mu, m-mu+1):
factor = (-1)**(m-n-mu)*choose(m-j, mu)*choose(j, m-n-mu)
cosSin = c**(j+2*mu-m+n)*s**(2*m-2*mu-j-n)
atrotate[m, n] = atrotate[m, n] + factor*cosSin*at[m, j]
if verbose: print(m, n, j, factor, 'cos^', j+2*mu-m+n, 'sin^',2*m-2*mu-j-n, ' A',m, j)
# Put back in linear layout
k = 0
for m in range(order+1):
for n in range(m+1):
arotate[k] = atrotate[m, n]
k+=1
return arotate
def ShiftCoeffs(a, xshift, yshift, order=4, verbose=False):
"""Calculate coefficients of polynomial when shifted to new origin"""
# First place in triangular layout
at = sp.zeros([order + 1, order + 1])
atshift = sp.zeros([order + 1, order + 1])
ashift = sp.zeros([len(a)]) # Copy shape of a
k = 0
for p in range(order + 1):
for q in range(p + 1):
at[p, q] = a[k]
k += 1
# Apply shift
for p in range(order + 1):
for q in range(p + 1):
if verbose:
print("A'%1d%1d" % (p, q))
for i in range(p, order + 1):
for j in range(q, i + 1 - (p - q)):
f = choose(j, q) * choose(i - j, p - q)
atshift[p, q] = atshift[p, q] + f * xshift ** ((i - j) - (p - q)) * yshift ** (
j - q) * at[i, j]
if verbose:
print('%2d A(%1d,%1d) x^%1d y^%1d' % (f, i, j, i - j - (p - q), (j - q)))
if verbose:
print()
# Put back in linear layout
k = 0
for p in range(order + 1):
for q in range(p + 1):
ashift[k] = atshift[p, q]
k += 1
return ashift
def testpoly():
[x, y] = sp.mgrid[0:10, 0:10]
# print 'X'
# print x
# print 'Y'
# print y
u = sp.zeros((10, 10))
v = sp.zeros((10, 10))
# Random polynomials
a0 = sp.random.rand(1)
a1 = 0.1 * (sp.random.rand(2) - 0.5)
a2 = 0.01 * (sp.random.rand(3) - 0.5)
a = sp.concatenate((a0, a1))
a = sp.concatenate((a, a2))
a[2] = 0.01 * a[2]
print('A coefficients')
print(a)
b0 = sp.random.rand(1)
b1 = 0.1 * (sp.random.rand(2) - 0.5)
b2 = 0.01 * (sp.random.rand(3) - 0.5)
b = sp.concatenate((b0, b1))
b = sp.concatenate((b, b2))
b[1] = 0.01 * b[1]
print('B coeffcicients')
print(b)
for i in range(10):
for j in range(10):
u[i, j] = poly(a, x[i, j], y[i, j], 2) # + sp.random.normal(0.0, 0.01)
v[i, j] = poly(b, x[i, j], y[i, j], 2) # + sp.random.normal(0.0,0.01)
# print z
s1 = polyFit2(u, x, y, 2)
s2 = polyFit2(v, x, y, 2)
print('S1', s1)
print('S2', s2)
uc = poly(s1, x, y, 2)
vc = poly(s2, x, y, 2)
pl.figure(1)
pl.clf()
pl.grid(True)
pl.plot(u, v, 'gx')
pl.plot(uc, vc, 'r+')
def TransCoeffs(A, a, b, c, d, order=4, verbose=False):
"""Transform polynomial coefficients to allow for
xp = a*x + b*y
yp = c*x + d*y"""
A1 = sp.zeros((order + 1, order + 1))
A2 = sp.zeros((order + 1, order + 1))
ncoeffs = (order + 1) * (order + 2) // 2
if verbose:
print(ncoeffs, 'coefficients for order', order)
AT = sp.zeros((ncoeffs))
# First place A in triangular layout
k = 0
for i in range(order + 1):
for j in range(i + 1):
A1[i, j] = A[k]
k += 1
for m in range(order + 1):
for n in range(m + 1):
if verbose:
print('\nM,N', m, n)
for mu in range(m - n + 1):
for j in range(m - n - mu, m - mu + 1):
if verbose:
print('J, MU', j, mu)
if verbose:
print('Choose', m - j, mu, 'and', j, m - n - mu)
factor = choose(m - j, mu) * choose(j, m - n - mu)
A2[m, n] += factor * a ** mu * b ** (m - j - mu) * c ** (m - n - mu) * d ** (
mu + j - m + n) * A1[m, j]
if verbose:
print(m, j, ' Factor', factor)
# Restore A2 to flat layout in AT
k = 0
for m in range(order + 1):
for n in range(m + 1):
AT[k] = A2[m, n]
k += 1
return AT
def triangle(A, order=4):
"""Print coefficients in triangular layout"""
k = 0
for i in range(order + 1):
for j in range(i + 1):
print('%12.5e' % A[k], end=' ')
k += 1
print()
def triangulate(A, order):
"""Convert linear array to 2-D array with triangular coefficient layout"""
AT = sp.zeros((order + 1, order + 1))
k = 0
for i in range(order + 1):
for j in range(i + 1):
AT[i, j] = A[k]
k += 1
return AT
def two_step(A, B, a, b, order):
"""
Change coefficients when
xp = a[0] + a[1].x + a[2].y
yp = b[0] + b[1].x + b[2].y
:param A:
:param B:
:param a:
:param b:
:param order:
:return:
"""
terms = (order+1)*(order+2)//2
A2 = sp.zeros((order+1,order+1))
B2 = sp.zeros((order+1,order+1))
k=0
for i in range(order+1):
for j in range(i+1):
for alpha in range(i-j+1):
for beta in range(i-j-alpha+1):
f1 = choose(i-j,alpha)*choose(i-j-alpha, beta)*a[0]**(i-j-alpha-beta)*a[1]**alpha*a[2]**beta
for gamma in range(j+1):
for delta in range(j-gamma+1):
f2 = choose(j,gamma)*choose(j-gamma,delta)*b[0]**(j-gamma-delta)*b[1]**gamma*b[2]**delta
A2[alpha+beta+gamma+delta, beta+delta] += A[k]*f1*f2
B2[alpha+beta+gamma+delta, beta+delta] += B[k]*f1*f2
k += 1
# Flatten A@ and B2
k = 0
Aflat = sp.zeros(terms)
Bflat = sp.zeros(terms)
for i in range(order+1):
for j in range(i+1):
Aflat[k] = A2[i, j]
Bflat[k] = B2[i, j]
k += 1
return (Aflat, Bflat)
# def TestTwoStep():
# A = sp.array([10.0, 2.0, 0.1, 0.01, -0.02, 0.03])
# B = sp.array([4.0, 1.8, 0.2, 0.02, 0.03, -0.02])
# a = sp.array([1.0, 0.5, 0.1])
# b = sp.array([2.0, 0.2, 0.6])
# print('\nA')
# triangle(A,2)
# print('B')
# triangle(B,2)
# print('a\n',a)
# print('b\n', b)
# (A2, B2) = TwoStep(A,B,a, b,2)
# print('\nA2')
# triangle(A2,2)
# print('B2')
# triangle(B2,2)
#
# # Now do a test calculation
# (x,y) = (10,5)
# xp = a[0] + a[1]*x + a[2]*y
# yp = b[0] + b[1]*x + b[2]*y
# print('x,y', x,y)
# print('xp,yp', xp,yp)
#
# u = poly(A, xp, yp, 2)
# v = poly(B, xp, yp, 2)
# up = poly(A2, x, y,2)
# vp = poly(B2, x, y,2)
# print('Two step', u, v)
# print('One step', up, vp)
# return
|
the-stack_0_26890
|
import numpy as np
import pandas as pd
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
from numpy import random
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from app.http.api.dbquery import doInsertRequest
def pre_processing(inputStr):
data = pd.read_csv('./Symptom_severity_training_data_updated.csv')
STOPWORDS = (stopwords.words('english'))
STOPWORDS.remove('not')
STOPWORDS.remove('very')
STOPWORDS.append('need')
STOPWORDS.append('want')
STOPWORDS.append('feel')
lemmatizer = WordNetLemmatizer()
def nltk_tag_to_wordnet_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return None
def lemmatize_sentence(sentence):
nltk_tagged = nltk.pos_tag(nltk.word_tokenize(sentence))
wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)
lemmatized_sentence = []
for word, tag in wordnet_tagged:
if tag is None:
lemmatized_sentence.append(word)
else:
lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))
return " ".join(lemmatized_sentence)
def clean_text(text):
"""
text: a string
return: modified initial string
"""
text = text.lower()
text = lemmatize_sentence(text)
text =' '.join(word for word in text.split() if word not in STOPWORDS)
return text
data['Symptom']=data['Symptom'].apply(clean_text)
X = data.Symptom
Y_classification = data.weight
X_train, X_test, y_train, y_test = train_test_split(X, Y_classification, test_size=0.2, random_state=2)
nb=Pipeline([('Vect', CountVectorizer(ngram_range=([1,3]))),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
nb.fit(X_train, y_train)
sgd =Pipeline([('Vect', CountVectorizer(ngram_range=([1,3]))),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', random_state=2, max_iter=5, tol=None)),
])
sgd.fit(X_train, y_train)
logreg =Pipeline([('Vect', CountVectorizer(ngram_range=([1,3]))),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(C=1e5)),
])
logreg.fit(X_train, y_train)
dt =Pipeline([('Vect', CountVectorizer(ngram_range=([1,3]))),
('tfidf', TfidfTransformer()),
('clf', DecisionTreeClassifier(criterion='gini',random_state=2)),
])
dt.fit(X_train, y_train)
knn =Pipeline([('Vect', CountVectorizer(ngram_range=([1,3]))),
('tfidf', TfidfTransformer()),
('clf', KNeighborsClassifier(n_neighbors=5)),
])
knn.fit(X_train, y_train)
input = clean_text(inputStr)
result1=nb.predict([input])
result2=sgd.predict([input])
result3=logreg.predict([input])
result4=dt.predict([input])
result5=knn.predict([input])
def most_frequent(List):
counter=0
num=0
for i in List:
curr_freq=List.count(i)
if(curr_freq>counter):
counter=curr_freq
num=i
return num
result=[result1, result2, result3, result4, result5]
finalresult=' '.join([str(elem) for elem in most_frequent(result)])
print(result, finalresult)
random_roomno=random.randint(1,20)
request_args = {'req_src_room':str(random_roomno), 'req_message':input, 'req_class':int(finalresult)}
return doInsertRequest(**request_args)
|
the-stack_0_26891
|
import numpy as np
import easyvvuq as uq
import os
import fabsim3_cmd_api as fab
import matplotlib.pyplot as plt
from scipy import stats
def get_kde(X, Npoints = 100):
kernel = stats.gaussian_kde(X)
x = np.linspace(np.min(X), np.max(X), Npoints)
pde = kernel.evaluate(x)
return x, pde
#post processing of UQ samples executed via FabSim. All samples must have been completed
#before this subroutine is executed. Use 'fabsim <machine_name> job_stat' to check their status
def post_proc(state_file, work_dir):
#Reload the campaign
my_campaign = uq.Campaign(state_file = state_file, work_dir = work_dir)
print('========================================================')
print('Reloaded campaign', my_campaign.campaign_dir.split('/')[-1])
print('========================================================')
#get sampler and output columns from my_campaign object
my_sampler = my_campaign._active_sampler
output_columns = my_campaign._active_app_decoder.output_columns
#copy the samples back to EasyVVUQ dir
fab.fetch_results()
#copy the results back to the EasyVVUQ Campaign directory
fab.get_uq_samples('ocean', my_campaign.campaign_dir, my_sampler._number_of_samples)
#collate output
my_campaign.collate()
# Post-processing analysis
sc_analysis = uq.analysis.SCAnalysis(sampler=my_sampler, qoi_cols=output_columns)
my_campaign.apply_analysis(sc_analysis)
results = my_campaign.get_last_analysis()
return results, sc_analysis, my_sampler, my_campaign
if __name__ == "__main__":
#home dir of this file
HOME = os.path.abspath(os.path.dirname(__file__))
work_dir = "/tmp"
results, sc_analysis, my_sampler, my_campaign = post_proc(state_file="campaign_state_test.json", work_dir = work_dir)
print('========================================================')
print('First order Sobol indices Energy:')
print(results['sobols_first']['E_mean'])
for param in my_sampler.vary.get_keys():
print('Parameter', param, 'accounts for', np.around(results['sobols_first']['E_mean'][param], 2)[0]*100, '% of the total variance of the energy.')
print('========================================================')
#################################
# Use SC expansion as surrogate #
#################################
#number of MC samples
n_mc = 50000
fig = plt.figure()
ax = fig.add_subplot(111, xlabel=r'$Energy$', yticks = [])
#get the input distributions
theta = my_sampler.vary.get_values()
xi = np.zeros([n_mc, 2])
idx = 0
#draw random sampler from the input distributions
for theta_i in theta:
xi[:, idx] = theta_i.sample(n_mc)
idx += 1
#evaluate the surrogate at the random values
Q = 'E_mean'
qoi = np.zeros(n_mc)
for i in range(n_mc):
qoi[i] = sc_analysis.surrogate(Q, xi[i])
#plot kernel density estimate of surrogate samples
x, kde = get_kde(qoi)
plt.plot(x, kde, label=r'$\mathrm{Energy\;pdf}$')
plt.legend()
plt.tight_layout()
plt.show()
|
the-stack_0_26893
|
import os
import re
from setuptools import find_packages, setup
ROOT = os.path.dirname(__file__)
VERSION_RE = re.compile(r"""__version__ = ['"]([0-9.]+)['"]""")
requires = [
"boto3>=1.16.63",
"cryptography>=3.3.1",
]
def get_version():
"""Reads the version from this module."""
init = open(os.path.join(ROOT, "s3_encryption_sdk", "__init__.py")).read()
return VERSION_RE.search(init).group(1)
setup(
name="s3-encryption-sdk",
version=get_version(),
description="S3 Encryption Client for Python",
long_description=open("README.rst").read(),
keywords="aws s3 kms client-side-encryption",
author="hupe1980",
url="https://github.com/hupe1980/aws-s3-encryption-python",
packages=find_packages(exclude=["tests*"]),
install_requires=requires,
data_files=["README.rst", "LICENSE"],
license="MIT",
python_requires=">= 3.6",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Security",
"Topic :: Security :: Cryptography",
],
)
|
the-stack_0_26895
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <[email protected]>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import re
import os
from jittor_utils import LOG, run_cmd, simple_timer
import json
from collections import OrderedDict
import glob
def parse_attrs(s):
'''parse @attrs(..., x=y) syntax'''
attrs = {}
if s is None: return attrs
for a in s.split(','):
a = a.strip()
if len(a)==0: continue
if '=' in a:
k, v = a.split('=')
attrs[k] = v
else:
attrs[a] = 1
return attrs
pytype_map = {
"const char*": ["PyUnicode_AsUTF8", "PyUnicode_FromString", "PyUnicode_CheckExact"],
"int": ["PyLong_AsLong", "PyLong_FromLong", "PyLong_CheckExact"],
"int64": ["PyLong_AsLongLong", "PyLong_FromLongLong", "PyLong_CheckExact"],
"uint": ["PyLong_AsUnsignedLong", "PyLong_FromUnsignedLong", "PyLong_CheckExact"],
"uint64": ["PyLong_AsUnsignedLongLong", "PyLong_FromUnsignedLongLong", "PyLong_CheckExact"],
"void": ["...", "GET_PY_NONE", "..."],
"PyObject*": ["","",""],
}
def get_pytype_map(T, i):
assert T != ""
if T in pytype_map:
return pytype_map[T][i]
return ["from_py_object", "to_py_object", "is_type"][i]+"<"+T+">"
binary_number_slots = {
"__add__": "nb_add",
"__sub__": "nb_subtract",
"__mul__": "nb_multiply",
"__mod__": "nb_remainder",
"__divmod__": "nb_divmod",
"__pow__": "nb_power",
"__lshift__": "nb_lshift",
"__rshift__": "nb_rshift",
"__and__": "nb_and",
"__xor__": "nb_xor",
"__or__": "nb_or",
"__floordiv__": "nb_floor_divide",
"__truediv__": "nb_true_divide",
"__matmul__": "nb_matrix_multiply",
}
for k,v in list(binary_number_slots.items()):
# __add__: nb_add ----> __iadd: nb_inplace_add
binary_number_slots["__i"+k[2:]] = "nb_inplace"+v[2:]
unary_number_slots = {
"__neg__": "nb_negative",
"__abs__": "nb_absolute",
}
def split_args(s):
# split args xxx,xxx, xx<xx,xx>, xx
s = s.strip()
if s=="": return []
prev = -1
presum = 0
args = []
for i in range(len(s)):
if s[i]=='<':
presum += 1
elif s[i]=='>':
presum -= 1
if presum==0 and s[i]==',':
args.append(s[prev+1:i])
prev = i
args.append(s[prev+1:])
return args
def get_def_code(df, scope_name, pyname, self_as_arg0=False):
is_fast_call = not pyname.startswith("__")
no_need_convert = pyname == "__getitem__"
args = df["args"]
# n==1 && PyXXX__CheckExact(args[0]) && ...
max_args = len(args)
min_args = max_args
for tid, a in enumerate(args):
if a[2] != "":
min_args = tid
break
arg_names = [ f"args[{i}]" for i in range(len(args))]
if self_as_arg0:
max_args -= 1
min_args -= 1
arg_names = ["self"] + arg_names[:-1]
kw_args_id = []
for aid, arg in enumerate(args):
if "VarHolder*" != arg[0] and is_fast_call:
kw_args_id.append(aid)
func_quick_check_runable = ""
func_quick_check_size = f"n<={max_args} && n>={min_args}"
if len(kw_args_id):
func_quick_check_size = f"n+(kw?Py_SIZE(kw):0)<={max_args} && n+(kw?Py_SIZE(kw):0)>={min_args}"
fill_with_default = ""
func_args_convert = ""
func_call = df["func_name"]+"("
pytypes = [ get_pytype_map(a[0],0) for a in args ]
holder_dec_array = []
holder_set_array = []
for tid, tpc in enumerate(pytypes):
check = get_pytype_map(args[tid][0],2)
default_arg = args[tid][2]
jtp = args[tid][0]
holder_dec = ""
holder_set = ""
if jtp == "VarHolder*":
holder_dec = f"unique_ptr<VarHolder> arg{tid}_holder"
holder_set = f", arg{tid}_holder"
if jtp == "VarSlices":
holder_dec = f"vector<unique_ptr<VarHolder>> arg{tid}_holder"
holder_set = f", arg{tid}_holder"
holder_dec_array.append(holder_dec)
holder_set_array.append(holder_set)
if len(default_arg):
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid};
if (n>{tid-self_as_arg0}) {{
CHECK(({check}({arg_names[tid]})));
arg{tid} = {tpc}({arg_names[tid]}{holder_set});
arg_filled |= 1ull << {tid};
}}
"""
fill_with_default += f"""
if (!(arg_filled & (1ull<<{tid}))) {{
arg{tid} = {default_arg};
}}
"""
else:
func_quick_check_runable += f" && {check}({arg_names[tid]})"
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid} = {tpc}({arg_names[tid]}{holder_set});
"""
if tid: func_call += ","
if args[tid][3].endswith("&&"):
func_call += f"move(arg{tid})"
else:
func_call += f"arg{tid}"
if pyname == "__richcmp__":
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if rname in df["attrs"]:
func_quick_check_runable += " && op==Py_"+rname[2:-2].upper()
# fill args with keyword arguments
fill_with_kw = ""
if is_fast_call and len(kw_args_id):
fill_with_kw = f"""
if (kw) {{
auto kw_n = Py_SIZE(kw);
for (int i=0; i<kw_n; i++) {{
auto ko = PyTuple_GET_ITEM(kw, i);
auto vo = args[i+n];
auto ks = PyUnicode_AsUTF8(ko);
uint khash = hash(ks);
{"".join([
f'''
if (khash == {get_hash(args[aid][1])}u) {{
// hash match {args[aid][1]}
CHECK(({get_pytype_map(args[aid][0],2)}(vo)));
arg{aid} = {pytypes[aid]}(vo{holder_set_array[aid]});
arg_filled |= 1ull << {aid};
continue;
}}
'''
for aid in kw_args_id
])}
LOGf << "Not a valid keyword:" << ks;
}}
}}
"""
if len(args):
func_args_convert += """
CHECK(!PyErr_Occurred());
"""
func_call += ")"
if df["is_property"]:
if pyname.startswith("__get__"):
func_call = df["func_name"]
else:
assert pyname.startswith("__set__"), pyname
func_call = df["func_name"] + "= arg0"
has_return = df["return_t"]!="void" and df["return_t"]!=""
# add XXX::xxx or XXX->xxx if is class def
if df["is_scope_def"]:
if df["is_static"]:
func_call = f"{scope_name}::" + func_call
else:
func_call = f"(GET_RAW_PTR({scope_name},self))->" + func_call
if pyname == "__init__":
# XXX->xxx(...) ---> new XXX xxx(...)
assert "->" in func_call, func_call
func_call = "new " + func_call.replace("->", " ")
if no_need_convert:
func_quick_check_runable = ""
func_args_convert = ""
fill_with_kw = fill_with_default = ""
return (
func_quick_check_size + func_quick_check_runable,
func_args_convert,
fill_with_kw+fill_with_default,
func_call,
has_return
)
hash_to_key_map = {}
def get_hash(s):
mask = (1<<32)-1
v=0
mul = 1
for c in s:
v += mul * ord(c)
mul *= 55
v &= mask
mul &= mask
if v in hash_to_key_map:
assert hash_to_key_map[v] == s, \
f"hash conflict {hash_to_key_map[v]} {s} {hash_to_key_map}"
hash_to_key_map[v] = s
return v
reg = re.compile(
'(/\\*(.*?)\\*/\\s*)?(//\\s*@pyjt\\(([^\\n]*)\\)\\s*)'
# ^^^^^^^^^^^^^^^^^ ^^^^ ^^^^
# doc string $1 pyjt args $3
+
'(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?'
# ^^^^^ ^^^^^^^
# attrs args $5
, re.DOTALL)
def generate_error_code_from_func_header(func_head, target_scope_name, name, dfs, basename, h, class_info):
# func_head is a string like:
# (PyObject* self, PyObject** args, int64 n, PyObject* kw) -> PyObject*
lib_name = os.path.basename(h).split("_")[0]
# TODO: fix/add var help
if target_scope_name == "Var": target_scope_name = None
if target_scope_name:
if target_scope_name == "flags":
help_name = "flags"
else:
help_name = ""+target_scope_name+'.'+name
else:
help_name = name
if lib_name in ["mpi", "nccl", "cudnn", "curand", "cublas", "mkl"]:
help_name = lib_name+'.'+help_name
help_cmd = f"help(jt.{help_name})"
LOG.vvv("gen err from func_head", func_head)
args = func_head[1:].split(")")[0].split(",")
error_code = f" << \"Wrong inputs arguments, Please refer to examples({help_cmd}).\""
error_code += r' << "\n\nTypes of your inputs are:\n"'
for arg in args:
arg = arg.strip()
if arg.startswith("PyObject* "):
t, n = arg.split(' ')
if n == "args" or n == "_args":
error_code += f" << PyTupleArgPrinter{{{n}, \"args\"}} "
elif n == "kw":
error_code += f" << PyKwArgPrinter{{{n}}} "
else:
error_code += f" << PyArgPrinter{{{n}, \"{n}\"}} "
elif arg.startswith("PyObject** "):
t, n = arg.split(' ')
error_code += f" << PyFastCallArgPrinter{{{n}, n, kw}} "
break
else:
LOG.vvv("Unhandled arg", arg)
LOG.vvv("gen err from func_head", func_head, " -> ", error_code)
return error_code
def compile_src(src, h, basename):
res = list(reg.finditer(src, re.S))
if len(res)==0: return
class_ranges = None
class_name = None
class_info = None
submodule_name = None
submodule_ranges = None
submodule_info = None
defs = []
LOG.vv(("find in", h))
for x in res:
LOG.vvv((x, x.groups()))
g = x.groups()
doc = g[1]
pyjt = g[3]
attrs = g[5]
esplit = lambda x: [] if x==None else \
[ a.strip() for a in x.split(",") if len(a.strip()) ]
attrs = parse_attrs(attrs)
pynames = esplit(pyjt)
end = x.end()
def find_bc(i):
while src[i] not in "({;":
i += 1
j = i+1
if src[i]==';':
return i, j
presum = 1
while True:
if src[j] in "({[":
presum += 1
elif src[j] in ")}]":
presum -= 1
if presum==0:
s = src[i]+src[j]
assert s in ("()","{}","()"), "braces not match "+s
return i, j
j += 1
# // @pyjt(DType)
# struct DType {
# ^ --> a
# .....
# } <--- b
# or
# // @pyjt(hash)
# inline uint hash(const char* input)
# ^ --> a ^ --> b
a, b = find_bc(end)
is_property = 0
if src[a] == ';':
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
is_property = 1
if src[a] == '{':
assert len(pynames)==1
if "submodule" in attrs:
assert submodule_ranges==None
submodule_ranges = (a, b)
submodule_name = src[end:a-1].strip().split()[-1]
submodule_info = {
"pynames": pynames,
"attrs": attrs
}
continue
assert class_ranges==None
class_ranges = (a, b)
class_name = src[end:a-1].strip().split()[-1]
class_info = {
"pynames": pynames,
"attrs": attrs
}
continue
is_scope_def = False
is_static = False
scope_name = ""
if class_ranges != None:
if class_ranges[0] < a and a < class_ranges[1]:
is_scope_def = True
scope_name = class_name
if submodule_ranges != None:
if submodule_ranges[0] < a and a < submodule_ranges[1]:
is_scope_def = True
scope_name = submodule_name
is_static = True
dec = src[end:b+1].strip()
arr = src[end:a].strip().split()
func_name = arr[-1]
is_constructor = False
if is_scope_def and func_name==class_name:
is_constructor = True
args = []
for arg in split_args(src[a+1:b]):
if arg=="": continue
default = ""
if "=" in arg:
arg, default = arg.split('=')
default = default
arg = arg.strip()
name = arg.split(' ')[-1]
tp = arg[:-len(name)]
tp = tp.strip()
prev_tp = tp
# const string& ----> string
if tp.startswith("const") and tp.endswith("&"):
tp = tp[5:-1].strip()
# T&& -> T
if tp.endswith("&&"):
tp = tp[:-2].strip()
# ArrayArgs& -> ArrayArgs
if tp.endswith("&"):
tp = tp[:-1].strip()
args.append((tp, name.strip(), default.strip(), prev_tp))
return_t = ""
for a in arr[:-1]:
if a in ["", "inline", "constexpr"]: continue
if a == "static":
is_static = True
continue
if return_t != "": return_t += " "
return_t += a
if is_scope_def and class_info and "submodule" in class_info["attrs"]:
is_static = True
for pid, pyname in enumerate(pynames):
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if pyname.endswith(rname):
attrs[rname] = 1
pynames[pid] = pyname.replace(rname, "__richcmp__")
def_info = {
"is_scope_def": is_scope_def,
"is_constructor": is_constructor,
"is_static": is_static,
"is_property": is_property,
"func_name": func_name,
"args": args, # [(type,name,defaut), ...]
"return_t": return_t, # return type
"dec": dec, # full string of xxx(A a, B b)
"pynames": pynames, # names in @pyjt(...)
"attrs": attrs, # attrs in @attrs(...)
"doc": doc,
"scope_name": scope_name,
}
if is_property:
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
assert is_scope_def and not is_static
def_info["is_property"] = 1
def_info["pynames"] = ["__get__"+n for n in pynames]
assert return_t != "void"
defs.append(dict(def_info))
def_info["pynames"] = ["__set__"+n for n in pynames]
assert len(args) == 0
def_info["args"] = [(def_info["return_t"], func_name, "", "")]
def_info["return_t"] = "void"
defs.append(dict(def_info))
continue
else:
defs.append(def_info)
LOG.vvv(lambda: json.dumps(def_info, indent=4))
# deal with defs
if len(defs) == 0: return
# include_name = h[4:] # remove "src/" prefix
include_name = h
code = []
class_defs_code = []
class_getsets_code = []
class_gets = OrderedDict()
class_sets = OrderedDict()
class_slots_code = []
submodule_defs_code = []
def_targets = OrderedDict()
for df in defs:
for name in df["pynames"]:
if df["is_scope_def"] and '.' not in name:
if df["scope_name"] == class_name:
name = class_info["pynames"][0] + '.' + name
else:
name = submodule_info["pynames"][0] + '.' + name
if name not in def_targets:
def_targets[name] = []
def_targets[name].append(df)
for name in def_targets:
dfs = def_targets[name]
target_scope_name = None
LOG.vv(name)
if "." in name:
target_scope_name, name = name.split(".")
# array for each df:
arr_func_quick_check_runable = []
arr_func_args_convert = []
arr_fill_with_default = []
arr_func_call = []
arr_has_return = []
self_as_arg0 = False
for df in dfs:
self_as_arg0 = class_info and \
target_scope_name == class_info["pynames"][0] and \
df["scope_name"] == submodule_name \
and not name.startswith("__")
res = get_def_code(df, df["scope_name"], name, bool(self_as_arg0))
arr_func_quick_check_runable.append(res[0])
arr_func_args_convert.append(res[1])
arr_fill_with_default.append(res[2])
arr_func_call.append(res[3])
arr_has_return.append(res[4])
slot_name = None
func_cast = ""
func_fill = ""
before_return = ""
if name == "__init__":
slot_name = "tp_init"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> int"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__repr__":
slot_name = "tp_repr"
func_head = "(PyObject* self) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__get__"):
slot_name = "tp_gets"
name = name[len("__get__"):]
func_head = "(PyObject* self, void*) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__set__"):
slot_name = "tp_sets"
name = name[len("__set__"):]
func_head = "(PyObject* self, PyObject* arg, void*) -> int"
func_fill = """
int64 n=1;
PyObject** args = &arg;
(void)n, (void)args;
"""
elif name == "__call__":
slot_name = "tp_call"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> PyObject*"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__dealloc__":
slot_name = "tp_dealloc"
func_head = "(PyObject* self) -> void"
func_fill = "int64 n = 0"
before_return = "Py_TYPE(self)->tp_free((PyObject *) self);"
elif name in binary_number_slots:
slot_name = "tp_as_number->"+binary_number_slots[name]
func_head = "(PyObject* self, PyObject* b) -> PyObject*"
if name.endswith("pow__"):
func_head = "(PyObject* self, PyObject* b, PyObject*) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name in unary_number_slots:
slot_name = "tp_as_number->"+unary_number_slots[name]
func_head = "(PyObject* self) -> PyObject*"
func_fill = """
int64 n = 1;
PyObject* args[] = {self};
(void)n, (void)args;
"""
elif name == "__richcmp__":
slot_name = "tp_richcompare"
func_head = "(PyObject* self, PyObject* b, int op) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name == "__len__":
slot_name = "tp_as_sequence->sq_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__map_len__":
slot_name = "tp_as_mapping->mp_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__getitem__":
slot_name = "tp_as_sequence->sq_item"
func_head = "(PyObject* self, Py_ssize_t arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
(void)n;
if (arg0 >= GET_RAW_PTR({dfs[0]["scope_name"]},self)->size()) {{
PyErr_SetString(PyExc_IndexError, "");
return (PyObject*)nullptr;
}}
"""
elif name == "__map_getitem__":
slot_name = "tp_as_mapping->mp_subscript"
func_head = "(PyObject* self, PyObject* arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
PyObject* args[] = {{arg0}};
(void)n;
"""
elif name.startswith("__"):
LOG.f(f"Not support slot {name}")
continue
else:
func_head = "(PyObject* self, PyObject** args, int64 n, PyObject* kw) -> PyObject*"
func_cast = f"(PyCFunction)(PyObject* (*)(PyObject*,PyObject**,int64,PyObject*))"
# if not return, return py_none
arr_has_return = [ True for _ in arr_has_return ]
arr_func_return = []
doc_all = ""
decs = "The function declarations are:\n"
for did, has_return in enumerate(arr_has_return):
df = dfs[did]
func_call = arr_func_call[did]
if df["doc"] and not (did > 0 and df["doc"] == dfs[did - 1]["doc"]):
doc_all += "Document:\n"
doc_all += df["doc"]+'\n'
doc_all += "Declaration:\n"
doc_all += df["dec"]+'\n\n'
decs += " " + df["dec"]+'\n'
if has_return:
assert "-> int" not in func_head
if "-> PyObject*" in func_head:
if "return_self" in df["attrs"]:
arr_func_return.append(
f"return (({func_call}), Py_INCREF(self), self)")
else:
arr_func_return.append(
f"return {get_pytype_map(df['return_t'],1)}(({func_call}))")
func_return_failed = "return nullptr"
else:
arr_func_return.append(
f"return ({func_call});")
func_return_failed = "return -1"
else:
if "-> int" in func_head:
arr_func_return.append(f"return ({func_call},0)")
func_return_failed = "return -1"
else:
assert "-> void" in func_head, func_head
arr_func_return.append(f"{func_call};{before_return}return")
func_return_failed = "return"
# generate error msg when not a valid call
error_log_code = generate_error_code_from_func_header(func_head, target_scope_name, name, dfs, basename ,h, class_info)
func = f"""
{func_cast}[]{func_head} {{
try {{
{func_fill};
uint64 arg_filled=0;
(void)arg_filled;
{"".join([f'''
if ({arr_func_quick_check_runable[did]}) {{
{arr_func_args_convert[did]};
{arr_fill_with_default[did]};
{arr_func_return[did]};
}}
'''
for did in range(len(arr_func_return))
])}
LOGf << "Not a valid call.";
}} catch (const std::exception& e) {{
if (!PyErr_Occurred()) {{
std::stringstream ss;
if (check_async_executor_error(e, ss)) {{
PyErr_Format(PyExc_RuntimeError,
"%s",
ss.str().c_str()
);
}} else {{
ss {error_log_code};
PyErr_Format(PyExc_RuntimeError,
"%s\\n%s\\nFailed reason:%s",
ss.str().c_str(),
R""({decs})"",
e.what()
);
}}
}}
}}
{func_return_failed};
}}
"""
if slot_name:
if slot_name=="tp_gets":
class_gets[name] = {
"func": func,
"doc": doc_all
}
continue
if slot_name=="tp_sets":
class_sets[name] = {
"func": func,
"doc": ""
}
continue
class_slots_code.append(f"""
tp.{slot_name} = {func};
""")
continue
need_static = ""
if df["is_scope_def"] and df["is_static"] and \
df["scope_name"] == class_name and \
"submodule" not in class_info["attrs"]:
need_static = " | METH_STATIC"
func = (f"""
{{ R""({name})"",
{func},
METH_FASTCALL | METH_KEYWORDS{need_static},
R""({doc_all})""
}}""")
if df["is_scope_def"]:
if df["scope_name"] == class_name or \
(class_info and \
target_scope_name == class_info["pynames"][0]):
class_defs_code.append(func)
else:
submodule_defs_code.append(func)
else:
code.append(func)
prop_names = list(set(class_gets.keys()).union(class_sets.keys()))
prop_names = sorted(prop_names)
for prop_name in prop_names:
get_func = "NULL"
set_func = "NULL"
doc = ""
if prop_name in class_gets:
get_func = class_gets[prop_name]["func"]
if class_gets[prop_name]["doc"]:
doc += class_gets[prop_name]["doc"]
if prop_name in class_sets:
set_func = class_sets[prop_name]["func"]
if class_sets[prop_name]["doc"]:
doc += class_sets[prop_name]["doc"]
class_getsets_code.append(f"""
{{"{prop_name}", {get_func}, {set_func}, R""({doc})""}}
""")
code.append("{0,0,0,0}")
class_defs_code.append("{0,0,0,0}")
class_getsets_code.append("{0,0,0,0}")
submodule_defs_code.append("{0,0,0,0}")
core_name = "jittor_core"
if class_info and "attrs" in class_info and "core_name" in class_info["attrs"]:
core_name = class_info["attrs"]["core_name"]
if submodule_info and "attrs" in submodule_info and "core_name" in submodule_info["attrs"]:
core_name = submodule_info["attrs"]["core_name"]
has_map = class_name in ["VarHolder", "NanoVector"]
has_seq = class_name == "NanoVector"
# add extra include to avoid compile error
src_code = ""
if include_name.endswith("var_slices.h"):
src_code += '#include "var_holder.h"\n'
src_code += f"""
#include "utils/seh.h"
#include "pyjt/py_converter.h"
#include "pyjt/py_arg_printer.h"
#include "common.h"
#include "{include_name}"
namespace jittor {{
{
"" if class_name is None else
f"PyHeapTypeObject Pyjt{class_name};" if "heaptype" in class_info["attrs"] else
f"PyTypeObject Pyjt{class_name};"
}
void pyjt_def_{basename}(PyObject* m) {{
static PyMethodDef defs[] = {{
{",".join(code)}
}};
ASSERT(PyModule_AddFunctions(m, defs)==0);
{
f'''
static PyMethodDef class_defs[] = {{
{",".join(class_defs_code)}
}};
static PyGetSetDef class_getsets[] = {{
{",".join(class_getsets_code)}
}};
static PyNumberMethods number_methods = {{0}};
{f"auto& htp =Pyjt{class_name}; auto& tp = htp.ht_type;"
if "heaptype" in class_info["attrs"] else
f"auto& tp = Pyjt{class_name};"}
tp.tp_as_number = &number_methods;
{f"static PyMappingMethods class_map_defs = {{0}};" if has_map else ""}
{f"tp.tp_as_mapping = &class_map_defs;" if has_map else ""}
{f"static PySequenceMethods class_seq_defs = {{0}};" if has_seq else ""}
{f"tp.tp_as_sequence = &class_seq_defs;" if has_seq else ""}
tp.tp_name = "{core_name}.{class_info["pynames"][0]}";
tp.tp_basicsize = GET_OBJ_SIZE({class_name});
tp.tp_new = PyType_GenericNew;
tp.tp_flags = Py_TPFLAGS_DEFAULT;
{"tp.tp_flags |= Py_TPFLAGS_HEAPTYPE; htp.ht_name = htp.ht_qualname = to_py_object<string>(tp.tp_name);"
if "heaptype" in class_info["attrs"] else ""}
tp.tp_methods = &class_defs[0];
tp.tp_getset = &class_getsets[0];
{"".join(class_slots_code)};
ASSERT(0==PyType_Ready(&tp)) << (PyErr_Print(), 0);
Py_INCREF(&tp);
ASSERT(0==PyModule_AddObject(m, "{class_info["pynames"][0]}", (PyObject*)&tp));
''' if class_name is not None else ""
}
{f'''
// sub module def
static PyMethodDef submodule_defs[] = {{
{",".join(submodule_defs_code)}
}};
auto sub = PyImport_AddModule("{core_name}.{submodule_info["pynames"][0]}");
ASSERT(PyModule_AddFunctions(sub, submodule_defs)==0);
ASSERT(sub);
ASSERT(0==PyModule_AddObject(m, "{submodule_info["pynames"][0]}", sub));
''' if submodule_name is not None else ""
}
}}
}}
"""
return src_code
def compile_single(head_file_name, src_file_name, src=None):
basename = os.path.basename(head_file_name).split(".")[0]
if src==None:
with open(head_file_name, 'r') as f:
src = f.read()
code = compile_src(src, head_file_name, basename)
if not code: return False
LOG.vvv("write to", src_file_name)
LOG.vvvv(code)
with open(src_file_name, 'w') as f:
f.write(code)
return True
def compile(cache_path, jittor_path):
headers1 = glob.glob(jittor_path+"/src/**/*.h", recursive=True)
headers2 = glob.glob(cache_path+"/gen/**/*.h", recursive=True)
headers = headers1 + headers2
basenames = []
pyjt_names = []
for h in headers:
with open(h, 'r') as f:
src = f.read()
bh = os.path.basename(h)
# jit_op_maker.h merge compile with var_holder.h
if bh == "var_holder.h": continue
if bh == "jit_op_maker.h":
with open(os.path.join(jittor_path, "src", "var_holder.h"), "r") as f:
src = f.read() + src
basename = bh.split(".")[0]
fname = "pyjt_"+basename+".cc"
fname = os.path.join(cache_path, "gen", fname)
check = compile_single(h, fname, src)
if not check: continue
basenames.append(basename)
pyjt_names.append(fname)
code = f"""
#include "pyjt/numpy.h"
#include "pyjt/py_converter.h"
#include "common.h"
namespace jittor {{
{ " ".join([f"extern void pyjt_def_{n}(PyObject* m);" for n in basenames])}
void pyjt_def_all(PyObject* m) {{
numpy_init();
{ " ".join([f"pyjt_def_{n}(m);" for n in basenames])}
}}
}}
"""
fname = os.path.join(cache_path, "gen", "pyjt_all.cc")
LOG.vvv(("write to", fname))
LOG.vvvv(code)
with open(fname, "w") as f:
f.write(code)
pyjt_names.append(fname)
return pyjt_names
|
the-stack_0_26896
|
#!/usr/bin/env python3
import os
import sqlite3
from itertools import cycle
from inspect import getsourcefile
def createDatabase(path):
'''Creates empty tables in path. Returns connection and cursor.'''
con = sqlite3.connect(path)
cur = con.cursor()
# Replace templates table
cur.execute('DROP TABLE IF EXISTS templates')
cur.execute('CREATE TABLE templates(id INTEGER PRIMARY KEY, template TEXT NOT NULL)')
# Replace names table
cur.execute('DROP TABLE IF EXISTS names')
cur.execute(
'''CREATE TABLE names(id INTEGER PRIMARY KEY,
name TEXT UNIQUE COLLATE NOCASE,
template_id REFERENCES templates(id))''')
# Replace extensions table
cur.execute('DROP TABLE IF EXISTS extensions')
cur.execute(
'''CREATE TABLE extensions(id INTEGER PRIMARY KEY,
extension TEXT UNIQUE COLLATE NOCASE,
template_id REFERENCES templates(id))''')
con.commit()
cur.close()
return con
def extractTemplateInfo(file_name):
''' Returns a tuple of template (names, extensions).'''
file_name, file_ext = file_name.split('.', 1)
names = file_name.split(',')
exts = file_ext.split('.')
return (names, exts)
def addTemplate(cursor, template, names, extensions):
'''Adds a template to the cursor's database.'''
# Insert template
if template is not None:
cursor.execute('INSERT INTO templates(template) VALUES(?)', [template])
template_id = cursor.lastrowid
# Insert names
if names:
try:
cursor.executemany(
'INSERT INTO names(name, template_id) VALUES(?, ?)',
zip(names, cycle([template_id])))
except sqlite3.IntegrityError:
pass
# Insert extensions
if extensions:
try:
cursor.executemany(
'INSERT INTO extensions(extension, template_id) VALUES(?, ?)',
zip(extensions, cycle([template_id])))
except sqlite3.IntegrityError:
pass
def makeTemplates(plates_path, base_path):
'''Loads code templates from plates_path into database.'''
# Initialize database
with createDatabase(base_path) as con:
cur = con.cursor()
file_names = os.listdir(plates_path)
file_names.sort()
for file_name in file_names:
template = open(os.path.join(plates_path, file_name)).read()
names, extensions = extractTemplateInfo(file_name)
addTemplate(cur, template, names, extensions)
cur.close()
con.commit()
def main():
source_file = os.path.realpath(getsourcefile(lambda:None))
source_dir = os.path.split(source_file)[0]
plates_path = os.path.join(source_dir, 'plates')
dest_path = os.path.join(source_dir, 'boil/plates.db')
makeTemplates(plates_path, dest_path)
if __name__ == '__main__':
main()
|
the-stack_0_26897
|
# This file is part of OpenQUA.
#
# Copyright (c) 2014 Iain R. Learmonth and contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of openqua nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from urllib2 import urlopen
import csv
from repeater import Repeater
CSV_URL = "http://www.ukrepeater.net/csvcreate.php"
# Format of CSV file is:
#
# * Callsign
# * Band
# * Channel
# * TX Frequency
# * RX Frequency
# * Mode
# * Maidenhead Locator
# * Natural Language Location
# * National Grid Reference
# * Region
# * CTCSS Tone
# * Keeper Callsign
# * Latitude
# * Longitude
#
# The first line of the file is a header and should be discarded.
# The fields are seperated by commas and quoted with double quotes.
f = urlopen(CSV_URL)
count = 0
data = csv.reader(f, delimiter=',', quotechar='"')
for row in data:
count += 1
if count == 1:
continue
repeater = Repeater(row[0])
repeater.tx = float(row[3])
repeater.rx = float(row[4])
if row[10] != '':
repeater.ctcss = float(row[10])
if row[5] == "AV":
repeater.mode = "FM"
if row[5] == "DSTAR":
repeater.mode = "DSTAR"
if row[5] == "DMR":
repeater.mode = "DMR"
if row[5] == "DUALMODE":
repeater.mode = "FM"
repeater.locator = row[6]
repeater.town = row[7]
repeater.keeper = row[11]
repeater.lat = row[12]
repeater.lon = row[13]
repeater.source = "http://ukrepeater.net/"
print(repeater)
repeater.update()
f.close()
|
the-stack_0_26898
|
import logging
from typing import Tuple, List, Optional
from blspy import G1Element
from clvm.casts import int_from_bytes, int_to_bytes
from bytecash.clvm.singleton import SINGLETON_LAUNCHER
from bytecash.consensus.block_rewards import calculate_pool_reward
from bytecash.consensus.coinbase import pool_parent_id
from bytecash.pools.pool_wallet_info import PoolState, LEAVING_POOL, SELF_POOLING
from bytecash.types.blockchain_format.coin import Coin
from bytecash.types.blockchain_format.program import Program, SerializedProgram
from bytecash.types.blockchain_format.sized_bytes import bytes32
from bytecash.types.coin_spend import CoinSpend
from bytecash.wallet.puzzles.load_clvm import load_clvm
from bytecash.wallet.puzzles.singleton_top_layer import puzzle_for_singleton
from bytecash.util.ints import uint32, uint64
log = logging.getLogger(__name__)
# "Full" is the outer singleton, with the inner puzzle filled in
SINGLETON_MOD = load_clvm("singleton_top_layer.clvm")
POOL_WAITING_ROOM_MOD = load_clvm("pool_waitingroom_innerpuz.clvm")
POOL_MEMBER_MOD = load_clvm("pool_member_innerpuz.clvm")
P2_SINGLETON_MOD = load_clvm("p2_singleton_or_delayed_puzhash.clvm")
POOL_OUTER_MOD = SINGLETON_MOD
POOL_MEMBER_HASH = POOL_MEMBER_MOD.get_tree_hash()
POOL_WAITING_ROOM_HASH = POOL_WAITING_ROOM_MOD.get_tree_hash()
P2_SINGLETON_HASH = P2_SINGLETON_MOD.get_tree_hash()
POOL_OUTER_MOD_HASH = POOL_OUTER_MOD.get_tree_hash()
SINGLETON_LAUNCHER_HASH = SINGLETON_LAUNCHER.get_tree_hash()
SINGLETON_MOD_HASH = POOL_OUTER_MOD_HASH
SINGLETON_MOD_HASH_HASH = Program.to(SINGLETON_MOD_HASH).get_tree_hash()
def create_waiting_room_inner_puzzle(
target_puzzle_hash: bytes32,
relative_lock_height: uint32,
owner_pubkey: G1Element,
launcher_id: bytes32,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Program:
pool_reward_prefix = bytes32(genesis_challenge[:16] + b"\x00" * 16)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(launcher_id, delay_time, delay_ph)
return POOL_WAITING_ROOM_MOD.curry(
target_puzzle_hash, p2_singleton_puzzle_hash, bytes(owner_pubkey), pool_reward_prefix, relative_lock_height
)
def create_pooling_inner_puzzle(
target_puzzle_hash: bytes,
pool_waiting_room_inner_hash: bytes32,
owner_pubkey: G1Element,
launcher_id: bytes32,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Program:
pool_reward_prefix = bytes32(genesis_challenge[:16] + b"\x00" * 16)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(launcher_id, delay_time, delay_ph)
return POOL_MEMBER_MOD.curry(
target_puzzle_hash,
p2_singleton_puzzle_hash,
bytes(owner_pubkey),
pool_reward_prefix,
pool_waiting_room_inner_hash,
)
def create_full_puzzle(inner_puzzle: Program, launcher_id: bytes32) -> Program:
return puzzle_for_singleton(launcher_id, inner_puzzle)
def create_p2_singleton_puzzle(
singleton_mod_hash: bytes,
launcher_id: bytes32,
seconds_delay: uint64,
delayed_puzzle_hash: bytes32,
) -> Program:
# curry params are SINGLETON_MOD_HASH LAUNCHER_ID LAUNCHER_PUZZLE_HASH SECONDS_DELAY DELAYED_PUZZLE_HASH
return P2_SINGLETON_MOD.curry(
singleton_mod_hash, launcher_id, SINGLETON_LAUNCHER_HASH, seconds_delay, delayed_puzzle_hash
)
def launcher_id_to_p2_puzzle_hash(launcher_id: bytes32, seconds_delay: uint64, delayed_puzzle_hash: bytes32) -> bytes32:
return create_p2_singleton_puzzle(
SINGLETON_MOD_HASH, launcher_id, int_to_bytes(seconds_delay), delayed_puzzle_hash
).get_tree_hash()
def get_delayed_puz_info_from_launcher_spend(coinsol: CoinSpend) -> Tuple[uint64, bytes32]:
extra_data = Program.from_bytes(bytes(coinsol.solution)).rest().rest().first()
# Extra data is (pool_state delayed_puz_info)
# Delayed puz info is (seconds delayed_puzzle_hash)
seconds: Optional[uint64] = None
delayed_puzzle_hash: Optional[bytes32] = None
for key, value in extra_data.as_python():
if key == b"t":
seconds = int_from_bytes(value)
if key == b"h":
delayed_puzzle_hash = bytes32(value)
assert seconds is not None
assert delayed_puzzle_hash is not None
return seconds, delayed_puzzle_hash
######################################
def get_template_singleton_inner_puzzle(inner_puzzle: Program):
r = inner_puzzle.uncurry()
if r is None:
return False
uncurried_inner_puzzle, args = r
return uncurried_inner_puzzle
def get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle(puzzle: Program) -> Tuple[uint64, bytes32]:
r = puzzle.uncurry()
if r is None:
return False
inner_f, args = r
singleton_mod_hash, launcher_id, launcher_puzzle_hash, seconds_delay, delayed_puzzle_hash = list(args.as_iter())
seconds_delay = uint64(seconds_delay.as_int())
return seconds_delay, delayed_puzzle_hash.as_atom()
# Verify that a puzzle is a Pool Wallet Singleton
def is_pool_singleton_inner_puzzle(inner_puzzle: Program) -> bool:
inner_f = get_template_singleton_inner_puzzle(inner_puzzle)
return inner_f in [POOL_WAITING_ROOM_MOD, POOL_MEMBER_MOD]
def is_pool_waitingroom_inner_puzzle(inner_puzzle: Program) -> bool:
inner_f = get_template_singleton_inner_puzzle(inner_puzzle)
return inner_f in [POOL_WAITING_ROOM_MOD]
def is_pool_member_inner_puzzle(inner_puzzle: Program) -> bool:
inner_f = get_template_singleton_inner_puzzle(inner_puzzle)
return inner_f in [POOL_MEMBER_MOD]
# This spend will use the escape-type spend path for whichever state you are currently in
# If you are currently a waiting inner puzzle, then it will look at your target_state to determine the next
# inner puzzle hash to go to. The member inner puzzle is already committed to its next puzzle hash.
def create_travel_spend(
last_coin_spend: CoinSpend,
launcher_coin: Coin,
current: PoolState,
target: PoolState,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Tuple[CoinSpend, Program]:
inner_puzzle: Program = pool_state_to_inner_puzzle(
current,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
if is_pool_member_inner_puzzle(inner_puzzle):
# inner sol is key_value_list ()
# key_value_list is:
# "ps" -> poolstate as bytes
inner_sol: Program = Program.to([[("p", bytes(target))], 0])
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
# inner sol is (spend_type, key_value_list, pool_reward_height)
destination_inner: Program = pool_state_to_inner_puzzle(
target, launcher_coin.name(), genesis_challenge, delay_time, delay_ph
)
log.debug(
f"create_travel_spend: waitingroom: target PoolState bytes:\n{bytes(target).hex()}\n"
f"{target}"
f"hash:{Program.to(bytes(target)).get_tree_hash()}"
)
# key_value_list is:
# "ps" -> poolstate as bytes
inner_sol = Program.to([1, [("p", bytes(target))], destination_inner.get_tree_hash()]) # current or target
else:
raise ValueError
current_singleton: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(last_coin_spend)
assert current_singleton is not None
if current_singleton.parent_coin_info == launcher_coin.name():
parent_info_list = Program.to([launcher_coin.parent_coin_info, launcher_coin.amount])
else:
p = Program.from_bytes(bytes(last_coin_spend.puzzle_reveal))
last_coin_spend_inner_puzzle: Optional[Program] = get_inner_puzzle_from_puzzle(p)
assert last_coin_spend_inner_puzzle is not None
parent_info_list = Program.to(
[
last_coin_spend.coin.parent_coin_info,
last_coin_spend_inner_puzzle.get_tree_hash(),
last_coin_spend.coin.amount,
]
)
full_solution: Program = Program.to([parent_info_list, current_singleton.amount, inner_sol])
full_puzzle: Program = create_full_puzzle(inner_puzzle, launcher_coin.name())
return (
CoinSpend(
current_singleton,
SerializedProgram.from_program(full_puzzle),
SerializedProgram.from_program(full_solution),
),
inner_puzzle,
)
def create_absorb_spend(
last_coin_spend: CoinSpend,
current_state: PoolState,
launcher_coin: Coin,
height: uint32,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> List[CoinSpend]:
inner_puzzle: Program = pool_state_to_inner_puzzle(
current_state, launcher_coin.name(), genesis_challenge, delay_time, delay_ph
)
reward_amount: uint64 = calculate_pool_reward(height)
if is_pool_member_inner_puzzle(inner_puzzle):
# inner sol is (spend_type, pool_reward_amount, pool_reward_height, extra_data)
inner_sol: Program = Program.to([reward_amount, height])
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
# inner sol is (spend_type, destination_puzhash, pool_reward_amount, pool_reward_height, extra_data)
inner_sol = Program.to([0, reward_amount, height])
else:
raise ValueError
# full sol = (parent_info, my_amount, inner_solution)
coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(last_coin_spend)
assert coin is not None
if coin.parent_coin_info == launcher_coin.name():
parent_info: Program = Program.to([launcher_coin.parent_coin_info, launcher_coin.amount])
else:
p = Program.from_bytes(bytes(last_coin_spend.puzzle_reveal))
last_coin_spend_inner_puzzle: Optional[Program] = get_inner_puzzle_from_puzzle(p)
assert last_coin_spend_inner_puzzle is not None
parent_info = Program.to(
[
last_coin_spend.coin.parent_coin_info,
last_coin_spend_inner_puzzle.get_tree_hash(),
last_coin_spend.coin.amount,
]
)
full_solution: SerializedProgram = SerializedProgram.from_program(
Program.to([parent_info, last_coin_spend.coin.amount, inner_sol])
)
full_puzzle: SerializedProgram = SerializedProgram.from_program(
create_full_puzzle(inner_puzzle, launcher_coin.name())
)
assert coin.puzzle_hash == full_puzzle.get_tree_hash()
reward_parent: bytes32 = pool_parent_id(height, genesis_challenge)
p2_singleton_puzzle: SerializedProgram = SerializedProgram.from_program(
create_p2_singleton_puzzle(SINGLETON_MOD_HASH, launcher_coin.name(), delay_time, delay_ph)
)
reward_coin: Coin = Coin(reward_parent, p2_singleton_puzzle.get_tree_hash(), reward_amount)
p2_singleton_solution: SerializedProgram = SerializedProgram.from_program(
Program.to([inner_puzzle.get_tree_hash(), reward_coin.name()])
)
assert p2_singleton_puzzle.get_tree_hash() == reward_coin.puzzle_hash
assert full_puzzle.get_tree_hash() == coin.puzzle_hash
assert get_inner_puzzle_from_puzzle(Program.from_bytes(bytes(full_puzzle))) is not None
coin_spends = [
CoinSpend(coin, full_puzzle, full_solution),
CoinSpend(reward_coin, p2_singleton_puzzle, p2_singleton_solution),
]
return coin_spends
def get_most_recent_singleton_coin_from_coin_spend(coin_sol: CoinSpend) -> Optional[Coin]:
additions: List[Coin] = coin_sol.additions()
for coin in additions:
if coin.amount % 2 == 1:
return coin
return None
def get_pubkey_from_member_inner_puzzle(inner_puzzle: Program) -> G1Element:
args = uncurry_pool_member_inner_puzzle(inner_puzzle)
if args is not None:
(
_inner_f,
_target_puzzle_hash,
_p2_singleton_hash,
pubkey_program,
_pool_reward_prefix,
_escape_puzzlehash,
) = args
else:
raise ValueError("Unable to extract pubkey")
pubkey = G1Element.from_bytes(pubkey_program.as_atom())
return pubkey
def uncurry_pool_member_inner_puzzle(inner_puzzle: Program): # -> Optional[Tuple[Program, Program, Program]]:
"""
Take a puzzle and return `None` if it's not a "pool member" inner puzzle, or
a triple of `mod_hash, relative_lock_height, pubkey` if it is.
"""
if not is_pool_member_inner_puzzle(inner_puzzle):
raise ValueError("Attempting to unpack a non-waitingroom inner puzzle")
r = inner_puzzle.uncurry()
if r is None:
raise ValueError("Failed to unpack inner puzzle")
inner_f, args = r
# p2_singleton_hash is the tree hash of the unique, curried P2_SINGLETON_MOD. See `create_p2_singleton_puzzle`
# escape_puzzlehash is of the unique, curried POOL_WAITING_ROOM_MOD. See `create_waiting_room_inner_puzzle`
target_puzzle_hash, p2_singleton_hash, owner_pubkey, pool_reward_prefix, escape_puzzlehash = tuple(args.as_iter())
return inner_f, target_puzzle_hash, p2_singleton_hash, owner_pubkey, pool_reward_prefix, escape_puzzlehash
def uncurry_pool_waitingroom_inner_puzzle(inner_puzzle: Program) -> Tuple[Program, Program, Program, Program]:
"""
Take a puzzle and return `None` if it's not a "pool member" inner puzzle, or
a triple of `mod_hash, relative_lock_height, pubkey` if it is.
"""
if not is_pool_waitingroom_inner_puzzle(inner_puzzle):
raise ValueError("Attempting to unpack a non-waitingroom inner puzzle")
r = inner_puzzle.uncurry()
if r is None:
raise ValueError("Failed to unpack inner puzzle")
inner_f, args = r
v = args.as_iter()
target_puzzle_hash, p2_singleton_hash, owner_pubkey, genesis_challenge, relative_lock_height = tuple(v)
return target_puzzle_hash, relative_lock_height, owner_pubkey, p2_singleton_hash
def get_inner_puzzle_from_puzzle(full_puzzle: Program) -> Optional[Program]:
p = Program.from_bytes(bytes(full_puzzle))
r = p.uncurry()
if r is None:
return None
_, args = r
_, inner_puzzle = list(args.as_iter())
if not is_pool_singleton_inner_puzzle(inner_puzzle):
return None
return inner_puzzle
def pool_state_from_extra_data(extra_data: Program) -> Optional[PoolState]:
state_bytes: Optional[bytes] = None
try:
for key, value in extra_data.as_python():
if key == b"p":
state_bytes = value
break
if state_bytes is None:
return None
return PoolState.from_bytes(state_bytes)
except TypeError as e:
log.error(f"Unexpected return from PoolWallet Smart Contract code {e}")
return None
def solution_to_pool_state(full_spend: CoinSpend) -> Optional[PoolState]:
full_solution_ser: SerializedProgram = full_spend.solution
full_solution: Program = Program.from_bytes(bytes(full_solution_ser))
if full_spend.coin.puzzle_hash == SINGLETON_LAUNCHER_HASH:
# Launcher spend
extra_data: Program = full_solution.rest().rest().first()
return pool_state_from_extra_data(extra_data)
# Not launcher spend
inner_solution: Program = full_solution.rest().rest().first()
# Spend which is not absorb, and is not the launcher
num_args = len(inner_solution.as_python())
assert num_args in (2, 3)
if num_args == 2:
# pool member
if inner_solution.rest().first().as_int() != 0:
return None
# This is referred to as p1 in the chialisp code
# spend_type is absorbing money if p1 is a cons box, spend_type is escape if p1 is an atom
# TODO: The comment above, and in the CLVM, seems wrong
extra_data = inner_solution.first()
if isinstance(extra_data.as_python(), bytes):
# Absorbing
return None
return pool_state_from_extra_data(extra_data)
else:
# pool waitingroom
if inner_solution.first().as_int() == 0:
return None
extra_data = inner_solution.rest().first()
return pool_state_from_extra_data(extra_data)
def pool_state_to_inner_puzzle(
pool_state: PoolState, launcher_id: bytes32, genesis_challenge: bytes32, delay_time: uint64, delay_ph: bytes32
) -> Program:
escaping_inner_puzzle: Program = create_waiting_room_inner_puzzle(
pool_state.target_puzzle_hash,
pool_state.relative_lock_height,
pool_state.owner_pubkey,
launcher_id,
genesis_challenge,
delay_time,
delay_ph,
)
if pool_state.state in [LEAVING_POOL, SELF_POOLING]:
return escaping_inner_puzzle
else:
return create_pooling_inner_puzzle(
pool_state.target_puzzle_hash,
escaping_inner_puzzle.get_tree_hash(),
pool_state.owner_pubkey,
launcher_id,
genesis_challenge,
delay_time,
delay_ph,
)
|
the-stack_0_26899
|
#!/usr/bin/env python
#
# License: BSD
# https://raw.githubusercontent.com/stonier/py_trees/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
About
^^^^^
A few new items arriving in tantalising bowls of flying spaghetti here:
* A gui for manually triggering events
* A gui (same one) for visualising the led strip status
* A lower priority work branch triggered from the gui
* A first action client behaviour
* A kind of pre-emption, via behaviour tree decision logic
Tree
^^^^
.. graphviz:: dot/tutorial-five.dot
.. literalinclude:: ../py_trees_ros/tutorials/five.py
:language: python
:linenos:
:lines: 121-179
:caption: py_trees_ros/tutorials/five.py#create_root
**Guards**
.. graphviz:: dot/tutorial-five-guard.dot
The entire scan branch is protected by a :term:`guard` (note that the blackbox
in the above diagram is exactly that, a black box representing the lower
part of the tree). Once the scan event is received, this branch gets to work
until it either finishes, or is pre-empted by the higher priority low battery
branch.
**A Kind of Preemption**
.. graphviz:: dot/tutorial-five-preempt.dot
The second part of the tree enables a kind of pre-emption on the scanning action.
If a new request comes in, it will trigger the secondary scan event check, invalidating
whatever scanning action was currently running. This will clear the led command and
cancel the rotate action. On the next tick, the scan event check will fail (it was
consumed on the last tick) and the scanning will restart.
.. note::
This is not true pre-emption since it cancels the rotate action and restarts it. It is
however, exactly the pattern that is required in many instances. For true pre-emption
you could bundle both scan check and rotation action in the same behaviour or dynamically
insert action goals on the fly from the parent class.
**Handling Failure**
If the rotate action should fail, then the whole branch will also fail. Subsequently
dropping the robot back to its idle state. A failure event could be generated by
simply watching either the 'Scanning' parallel or the :meth:`~py_trees.trees.BehaviourTree.tip`
of the tree and reacting to it's state change.
Behaviours
^^^^^^^^^^
Introducing the rotate action client behaviour!
.. literalinclude:: ../py_trees_ros/tutorials/five.py
:language: python
:linenos:
:lines: 158-163
:caption: py_trees_ros/tutorials/five.py#action_client_instantiation
.. literalinclude:: ../py_trees_ros/actions.py
:language: python
:linenos:
:lines: 28-121
:caption: py_trees_ros/actions.py#ActionClient
The :class:`~py_trees_ros.actions.ActionClient` is a generic template that can be used as
a drop-in for very simply monitoring the aborted/cancelled/running/success state of an
underlying controller with a pre-configured goal. See the :class:`api <py_trees_ros.actions.ActionClient>`
for details on when/how you might wish to extend this.
Running
^^^^^^^
.. code-block:: bash
$ roslaunch py_trees_ros tutorial_five.launch --screen
**Playing with the Spaghetti**
* Press the scan button to start a scan
* Press the scan button again while mid-scanning to pre-empt
* Set battery low in reconfigure whilst mid-scanning to priority switch
.. image:: images/tutorial-five-scanning.png
"""
##############################################################################
# Imports
##############################################################################
import functools
import py_trees
import py_trees_ros
import py_trees.console as console
import py_trees_msgs.msg as py_trees_msgs
import rospy
import sys
#from actionlib_msgs.msg import *
from phoenix_msgs.msg import *
import move_base_msgs.msg as move_base_msgs
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from math import radians, degrees
##############################################################################
# Behaviours
##############################################################################
def create_root():
# behaviours
root = py_trees.composites.Parallel("gizmo")
topics2bb = py_trees.composites.Sequence("Topics2BB")
scan2bb = py_trees_ros.subscribers.EventToBlackboard(
name="Scan2BB",
topic_name="/dashboard/scan",
variable_name="event_scan_button"
)
priorities = py_trees.composites.Selector("Priorities")
######################### scan
Showtime = py_trees.composites.Sequence(name="Showtime")
is_scan_requested = py_trees.blackboard.CheckBlackboardVariable(
name="Start show?",
variable_name='event_scan_button',
expected_value=True
)
Stage_center = py_trees.composites.Sequence(name="stage_center")
Stage_left = py_trees.composites.Sequence(name="stage_left")
Stage_right = py_trees.composites.Sequence(name="stage_right")
move_center = py_trees_ros.actions.ActionClient(
name="Move Center",
action_namespace="/move_base",
action_spec=move_base_msgs.MoveBaseAction,
action_goal= create_nav_goal(1.593, -0.350,1.363),
override_feedback_message_on_running="rotating"
)
move_left = py_trees_ros.actions.ActionClient(
name="Move left",
action_namespace="/move_base",
action_spec=move_base_msgs.MoveBaseAction,
action_goal= create_nav_goal(1.777, 0.406,1.042),
override_feedback_message_on_running="rotating"
)
move_right = py_trees_ros.actions.ActionClient(
name="Move right",
action_namespace="/move_base",
action_spec=move_base_msgs.MoveBaseAction,
action_goal= create_nav_goal(1.546, -0.411,1.581),
override_feedback_message_on_running="rotating"
)
home = py_trees_ros.actions.ActionClient(
name="home",
action_namespace="/move_base",
action_spec=move_base_msgs.MoveBaseAction,
action_goal= create_nav_goal(-0.045, 0.172,0.486),
override_feedback_message_on_running="rotating"
)
voice1 = py_trees_ros.actions.ActionClient(
name="Welcome",
action_namespace="/voice_action",
action_spec=VoiceAction,
action_goal=voice_msg("Welcome to SHOW time. my name is Robbie, This is where I get to demonstrate my capabilities"),
override_feedback_message_on_running="rotating"
)
voice2 = py_trees_ros.actions.ActionClient(
name="left_talk",
action_namespace="/voice_action",
action_spec=VoiceAction,
action_goal=voice_msg("I can move to my left"),
override_feedback_message_on_running="rotating"
)
voice3 = py_trees_ros.actions.ActionClient(
name="Right talk",
action_namespace="/voice_action",
action_spec=VoiceAction,
action_goal=voice_msg("I can move to my right"),
override_feedback_message_on_running="rotating"
)
voice4 = py_trees_ros.actions.ActionClient(
name="return center",
action_namespace="/voice_action",
action_spec=VoiceAction,
action_goal=voice_msg("And back to the center"),
override_feedback_message_on_running="rotating"
)
voice5 = py_trees_ros.actions.ActionClient(
name="end talk",
action_namespace="/voice_action",
action_spec=VoiceAction,
action_goal=voice_msg("Thank you very much for yourtime. Goodbye"),
override_feedback_message_on_running="rotating"
)
idle = py_trees.behaviours.Running(name="Idle")
################### tree
root.add_children([topics2bb, priorities])
topics2bb.add_children([scan2bb])
priorities.add_children([Showtime, idle])
Showtime.add_children([is_scan_requested, Stage_center, Stage_left, Stage_right])
Stage_center.add_children([move_center, voice1])
Stage_left.add_children([voice2, move_left, voice4, move_center])
Stage_right.add_children([voice3, move_right, voice4, move_center, voice5, home])
return root
def voice_msg(text):
g = VoiceGoal()
g.text = text
return g
def create_nav_goal(x, y, yaw):
"""Create a MoveBaseGoal with x, y position and yaw rotation (in degrees).
Returns a MoveBaseGoal"""
mb_goal = MoveBaseGoal()
mb_goal.target_pose.header.frame_id = 'map' # Note: the frame_id must be map
mb_goal.target_pose.pose.position.x = x
mb_goal.target_pose.pose.position.y = y
mb_goal.target_pose.pose.position.z = 0.0 # z must be 0.0 (no height in the map)
# Orientation of the robot is expressed in the yaw value of euler angles
angle = radians(yaw) # angles are expressed in radians
quat = quaternion_from_euler(0.0, 0.0, angle) # roll, pitch, yaw
mb_goal.target_pose.pose.orientation = Quaternion(*quat.tolist())
return mb_goal
def shutdown(behaviour_tree):
behaviour_tree.interrupt()
##############################################################################
# Main
##############################################################################
def main():
"""
Entry point for the demo script.
"""
rospy.init_node("tree")
root = create_root()
behaviour_tree = py_trees_ros.trees.BehaviourTree(root)
rospy.on_shutdown(functools.partial(shutdown, behaviour_tree))
if not behaviour_tree.setup(timeout=15):
console.logerror("failed to setup the tree, aborting.")
sys.exit(1)
behaviour_tree.tick_tock(500)
if __name__ == '__main__':
main()
|
the-stack_0_26900
|
# -*- coding: utf-8 -*-
# Author: TDC Team
# License: MIT
import numpy as np
from typing import List
try:
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from rdkit import rdBase
rdBase.DisableLog('rdApp.error')
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem import MACCSkeys
except:
raise ImportError("Please install rdkit by 'conda install -c conda-forge rdkit'! ")
from ...utils import print_sys
from ..oracle.oracle import smiles_to_rdkit_mol, smiles_2_fingerprint_ECFP4, smiles_2_fingerprint_FCFP4, smiles_2_fingerprint_AP, smiles_2_fingerprint_ECFP6
from ._smiles2pubchem import smiles2pubchem
def canonicalize(smiles):
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
return Chem.MolToSmiles(mol, isomericSmiles=True)
else:
return None
def smiles2morgan(s, radius = 2, nBits = 1024):
"""Convert smiles into Morgan Fingerprint.
Args:
smiles: str
radius: int (default: 2)
nBits: int (default: 1024)
Returns:
fp: numpy.array
"""
try:
s = canonicalize(s)
mol = Chem.MolFromSmiles(s)
features_vec = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
except:
print_sys('rdkit not found this smiles for morgan: ' + s + ' convert to all 0 features')
features = np.zeros((nBits, ))
return features
def smiles2rdkit2d(s):
"""Convert smiles into 200-dim Normalized RDKit 2D vector.
Args:
smiles: str
Returns:
fp: numpy.array
"""
s = canonicalize(s)
try:
from descriptastorus.descriptors import rdDescriptors, rdNormalizedDescriptors
except:
raise ImportError("Please install pip install git+https://github.com/bp-kelley/descriptastorus and pip install pandas-flavor")
try:
generator = rdNormalizedDescriptors.RDKit2DNormalized()
features = np.array(generator.process(s)[1:])
NaNs = np.isnan(features)
features[NaNs] = 0
except:
print_sys('descriptastorus not found this smiles: ' + s + ' convert to all 0 features')
features = np.zeros((200, ))
return np.array(features)
def smiles2daylight(s):
"""Convert smiles into 2048-dim Daylight feature.
Args:
smiles: str
Returns:
fp: numpy.array
"""
try:
s = canonicalize(s)
NumFinger = 2048
mol = Chem.MolFromSmiles(s)
bv = FingerprintMols.FingerprintMol(mol)
temp = tuple(bv.GetOnBits())
features = np.zeros((NumFinger, ))
features[np.array(temp)] = 1
except:
print_sys('rdkit not found this smiles: ' + s + ' convert to all 0 features')
features = np.zeros((2048, ))
return np.array(features)
def smiles2maccs(s):
"""Convert smiles into maccs feature.
Args:
smiles: str
Returns:
fp: numpy.array
"""
s = canonicalize(s)
mol = Chem.MolFromSmiles(s)
fp = MACCSkeys.GenMACCSKeys(mol)
arr = np.zeros((0,), dtype=np.float64)
DataStructs.ConvertToNumpyArray(fp,arr)
return arr
'''
ECFP2 ---- 1
ECFP4 ---- 2
ECFP6 ---- 3
xxxxxxxxx ------ https://github.com/rdkit/benchmarking_platform/blob/master/scoring/fingerprint_lib.py
'''
def smiles2ECFP2(smiles):
"""Convert smiles into ECFP2 Morgan Fingerprint.
Args:
smiles: str
Returns:
fp: rdkit.DataStructs.cDataStructs.UIntSparseIntVect
"""
nbits = 2048
smiles = canonicalize(smiles)
molecule = smiles_to_rdkit_mol(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(molecule, 1, nBits=nbits)
arr = np.zeros((0,), dtype=np.float64)
DataStructs.ConvertToNumpyArray(fp,arr)
return arr
def smiles2ECFP4(smiles):
"""Convert smiles into ECFP4 Morgan Fingerprint.
Args:
smiles: str
Returns:
fp: rdkit.DataStructs.cDataStructs.UIntSparseIntVect
"""
nbits = 2048
smiles = canonicalize(smiles)
molecule = smiles_to_rdkit_mol(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(molecule, 2, nBits=nbits)
arr = np.zeros((0,), dtype=np.float64)
DataStructs.ConvertToNumpyArray(fp,arr)
return arr
def smiles2ECFP6(smiles):
"""Convert smiles into ECFP6 Morgan Fingerprint.
Args:
smiles: str, a SMILES string
Returns:
fp: rdkit.DataStructs.cDataStructs.UIntSparseIntVect
"""
nbits = 2048
smiles = canonicalize(smiles)
molecule = smiles_to_rdkit_mol(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(molecule, 1, nBits=nbits)
arr = np.zeros((0,), dtype=np.float64)
DataStructs.ConvertToNumpyArray(fp,arr)
return arr
# def smiles2smart(smiles):
class MoleculeFingerprint:
'''
Example:
MolFP = MoleculeFingerprint(fp = 'ECFP6')
out = MolFp('Clc1ccccc1C2C(=C(/N/C(=C2/C(=O)OCC)COCCN)C)\C(=O)OC')
# np.array([1, 0, 1, .....])
out = MolFp(['Clc1ccccc1C2C(=C(/N/C(=C2/C(=O)OCC)COCCN)C)\C(=O)OC',
'CCCOc1cc2ncnc(Nc3ccc4ncsc4c3)c2cc1S(=O)(=O)C(C)(C)C'])
# np.array([[1, 0, 1, .....],
[0, 0, 1, .....]])
Supporting FPs:
Basic_Descriptors(atoms, chirality, ....), ECFP2, ECFP4, ECFP6, MACCS, Daylight-type, RDKit2D, Morgan, PubChem
'''
def __init__(self, fp = 'ECFP4'):
fp2func = {'ECFP2': smiles2ECFP2,
'ECFP4': smiles2ECFP4,
'ECFP6': smiles2ECFP6,
'MACCS': smiles2maccs,
'Daylight': smiles2daylight,
'RDKit2D': smiles2rdkit2d,
'Morgan': smiles2morgan,
'PubChem': smiles2pubchem}
try:
assert fp in fp2func
except:
raise Exception("The fingerprint you specify are not supported. \
It can only among 'ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem'")
self.fp = fp
self.func = fp2func[fp]
def __call__(self, x):
if type(x)==str:
return self.func(x)
elif type(x)==list:
lst = list(map(self.func, x))
arr = np.vstack(lst)
return arr
def smiles2selfies(smiles):
"""Convert smiles into selfies.
Args:
smiles: str, a SMILES string
Returns:
selfies: str, a SELFIES string.
"""
smiles = canonicalize(smiles)
return sf.encoder(smiles)
def selfies2smiles(selfies):
"""Convert selfies into smiles.
Args:
selfies: str, a SELFIES string.
Returns:
smiles: str, a SMILES string
"""
return canonicalize(sf.decoder(selfies))
def smiles2mol(smiles):
"""Convert SMILES string into rdkit.Chem.rdchem.Mol.
Args:
smiles: str, a SMILES string.
Returns:
mol: rdkit.Chem.rdchem.Mol
"""
smiles = canonicalize(smiles)
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return None
Chem.Kekulize(mol)
return mol
def bondtype2idx(bond_type):
if bond_type == Chem.rdchem.BondType.SINGLE:
return 1
elif bond_type == Chem.rdchem.BondType.DOUBLE:
return 2
elif bond_type == Chem.rdchem.BondType.TRIPLE:
return 3
elif bond_type == Chem.rdchem.BondType.AROMATIC:
return 4
def smiles2graph2D(smiles):
"""convert SMILES string into two-dimensional molecular graph feature
Args:
smiles, str, a SMILES string
Returns:
idx2atom: dict, map from index to atom's symbol, e.g., {0:'C', 1:'N', ...}
adj_matrix: np.array
"""
smiles = canonicalize(smiles)
mol = smiles2mol(smiles)
n_atoms = mol.GetNumAtoms()
idx2atom = {atom.GetIdx():atom.GetSymbol() for atom in mol.GetAtoms()}
adj_matrix = np.zeros((n_atoms, n_atoms), dtype = int)
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
idx1 = a1.GetIdx()
idx2 = a2.GetIdx()
bond_type = bond.GetBondType()
bond_idx = bondtype2idx(bond_type)
adj_matrix[idx1,idx2] = bond_idx
adj_matrix[idx2,idx1] = bond_idx
return idx2atom, adj_matrix
def get_mol(smiles):
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return None
Chem.Kekulize(mol)
return mol
############### PyG begin ###############
ELEM_LIST = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'Al', 'I', 'B', 'K', 'Se', 'Zn', 'H', 'Cu', 'Mn', 'unknown']
ATOM_FDIM = len(ELEM_LIST) + 6 + 5 + 4 + 1
BOND_FDIM = 5 + 6
MAX_NB = 6
# https://github.com/kexinhuang12345/DeepPurpose/blob/master/DeepPurpose/chemutils.py
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def get_atom_features(atom):
return torch.Tensor(onek_encoding_unk(atom.GetSymbol(), ELEM_LIST)
+ onek_encoding_unk(atom.GetDegree(), [0,1,2,3,4,5])
+ onek_encoding_unk(atom.GetFormalCharge(), [-1,-2,1,2,0])
+ onek_encoding_unk(int(atom.GetChiralTag()), [0,1,2,3])
+ [atom.GetIsAromatic()])
def smiles2PyG(smiles):
"""convert SMILES string into torch_geometric.data.Data
Args:
smiles, str, a SMILES string
Returns:
data, torch_geometric.data.Data
"""
smiles = canonicalize(smiles)
mol = Chem.MolFromSmiles(smiles)
n_atoms = mol.GetNumAtoms()
atom_features = [get_atom_features(atom) for atom in mol.GetAtoms()]
atom_features = torch.stack(atom_features)
y = [atom.GetSymbol() for atom in mol.GetAtoms()]
y = list(map(lambda x: ELEM_LIST.index(x) if x in ELEM_LIST else len(ELEM_LIST)-1 , y))
y = torch.LongTensor(y)
bond_features = []
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
idx1 = a1.GetIdx()
idx2 = a2.GetIdx()
bond_features.extend([[idx1, idx2], [idx2, idx1]])
bond_features = torch.LongTensor(bond_features)
data = Data(x=atom_features, edge_index=bond_features.T)
return data
def molfile2PyG(molfile):
smiles = molfile2smiles(molfile)
smiles = canonicalize(smiles)
return smiles2PyG(smiles)
############### PyG end ###############
############### DGL begin ###############
def smiles2DGL(smiles):
"""convert SMILES string into dgl.DGLGraph
Args:
smiles, str, a SMILES string
Returns:
g: dgl.DGLGraph()
"""
smiles = canonicalize(smiles)
mol = Chem.MolFromSmiles(smiles)
n_atoms = mol.GetNumAtoms()
bond_features = []
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
idx1 = a1.GetIdx()
idx2 = a2.GetIdx()
bond_features.extend([[idx1, idx2], [idx2, idx1]])
src, dst = tuple(zip(*bond_features))
g = dgl.DGLGraph()
g.add_nodes(n_atoms)
g.add_edges(src, dst)
return g
############### DGL end ###############
from ._xyz2mol import xyzfile2mol
def mol2smiles(mol):
smiles = Chem.MolToSmiles(mol)
smiles = canonicalize(smiles)
return smiles
def xyzfile2smiles(xyzfile):
"""convert xyzfile into smiles string.
Args:
xyzfile: str, file
Returns:
smiles: str, a SMILES string
"""
mol, _ = xyzfile2mol(xyzfile)
smiles = mol2smiles(mol)
smiles = canonicalize(smiles)
return smiles
def xyzfile2selfies(xyzfile):
"""convert xyzfile into SELFIES string.
Args:
xyzfile: str, file
Returns:
selfies: str, a SELFIES string.
"""
smiles = xyzfile2smiles(xyzfile)
smiles = canonicalize(smiles)
selfies = smiles2selfies(smiles)
return selfies
def distance3d(coordinate_1, coordinate_2):
return np.sqrt(sum([(c1-c2)**2 for c1,c2 in zip(coordinate_1, coordinate_2)]))
def upper_atom(atomsymbol):
return atomsymbol[0].upper() + atomsymbol[1:]
def xyzfile2graph3d(xyzfile):
atoms, charge, xyz_coordinates = read_xyz_file(file)
num_atoms = len(atoms)
distance_adj_matrix = np.zeros((num_atoms, num_atoms))
for i in range(num_atoms):
for j in range(i+1, num_atoms):
distance = distance3d(xyz_coordinates[i], xyz_coordinates[j])
distance_adj_matrix[i,j] = distance_adj_matrix[j,i] = distance
idx2atom = {idx:upper_atom(str_atom(atom)) for idx,atom in enumerate(atoms)}
mol, BO = xyzfile2mol(xyzfile)
return idx2atom, distance_adj_matrix, BO
############## end xyz2mol ################
def sdffile2smiles_lst(sdffile):
"""convert SDF file into a list of SMILES string.
Args:
sdffile: str, file
Returns:
smiles_lst: a list of SMILES strings.
"""
from rdkit.Chem.PandasTools import LoadSDF
df = LoadSDF(sdffile, smilesName='SMILES')
smiles_lst = df['SMILES'].to_list()
return smiles_lst
def sdffile2mol_conformer(sdffile):
"""convert sdffile into a list of molecule conformers.
Args:
sdffile: str, file
Returns:
smiles_lst: a list of molecule conformers.
"""
from rdkit.Chem.PandasTools import LoadSDF
df = LoadSDF(sdffile, smilesName='SMILES')
mol_lst = df['ROMol'].tolist()
conformer_lst = []
for mol in mol_lst:
conformer = mol.GetConformer(id=0)
conformer_lst.append(conformer)
mol_conformer_lst = list(zip(mol_lst, conformer_lst))
return mol_conformer_lst
def mol_conformer2graph3d(mol_conformer_lst):
"""convert list of (molecule, conformer) into a list of 3D graph.
Args:
mol_conformer_lst: list of tuple (molecule, conformer)
Returns:
graph3d_lst: a list of 3D graph.
each graph has (i) idx2atom (dict); (ii) distance_adj_matrix (np.array); (iii) bondtype_adj_matrix (np.array)
"""
graph3d_lst = []
bond2num = {'SINGLE': 1, 'DOUBLE':2, 'TRIPLE':3, "AROMATIC":4}
for mol, conformer in mol_conformer_lst:
atom_num = mol.GetNumAtoms()
distance_adj_matrix = np.zeros((atom_num, atom_num))
bondtype_adj_matrix = np.zeros((atom_num, atom_num), dtype = int)
idx2atom = {i:v.GetSymbol() for i,v in enumerate(mol.GetAtoms())}
positions = []
for i in range(atom_num):
pos = conformer.GetAtomPosition(i)
coordinate = np.array([pos.x, pos.y, pos.z]).reshape(1,3)
positions.append(coordinate)
positions = np.concatenate(positions, 0)
for i in range(atom_num):
for j in range(i+1, atom_num):
distance_adj_matrix[i,j] = distance_adj_matrix[j,i] = distance3d(positions[i], positions[j])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom().GetIdx()
a2 = bond.GetEndAtom().GetIdx()
bt = bond.GetBondType()
bondtype_adj_matrix[a1,a2] = bond2num[str(bt)]
bondtype_adj_matrix[a1,a2] = bond2num[str(bt)]
graph3d_lst.append((idx2atom, distance_adj_matrix, bondtype_adj_matrix))
return graph3d_lst
def sdffile2graph3d_lst(sdffile):
"""convert SDF file into a list of 3D graph.
Args:
sdffile: SDF file
Returns:
graph3d_lst: a list of 3D graph.
each graph has (i) idx2atom (dict); (ii) distance_adj_matrix (np.array); (iii) bondtype_adj_matrix (np.array)
"""
mol_conformer_lst = sdffile2mol_conformer(sdffile)
graph3d_lst = mol_conformer2graph3d(mol_conformer_lst)
return graph3d_lst
def sdffile2selfies_lst(sdf):
"""convert sdffile into a list of SELFIES strings.
Args:
sdffile: str, file
Returns:
selfies_lst: a list of SELFIES strings.
"""
smiles_lst = sdffile2smiles_lst(sdf)
selfies_lst = list(map(smiles2selfies, smiles_lst))
return selfies_lst
def smiles_lst2coulomb(smiles_lst):
"""convert a list of SMILES strings into coulomb format.
Args:
smiles_lst: a list of SELFIES strings.
Returns:
features: np.array
"""
molecules = [Molecule(smiles, 'smiles') for smiles in smiles_lst]
for mol in molecules:
mol.to_xyz(optimizer='UFF')
cm = CoulombMatrix(cm_type='UM', n_jobs=-1)
features = cm.represent(molecules)
features = features.to_numpy()
return features
## (nmol, max_atom_n**2),
## where max_atom_n is maximal number of atom in the smiles_lst
## features[i].reshape(max_atom_n, max_atom_n)[:3,:3] -> 3*3 Coulomb matrix
def sdffile2coulomb(sdf):
"""convert sdffile into a list of coulomb feature.
Args:
sdffile: str, file
Returns:
coulomb feature: np.array
"""
smiles_lst = sdffile2smiles_lst(sdf)
return smiles_lst2coulomb(smiles_lst)
def xyzfile2coulomb(xyzfile):
smiles = xyzfile2smiles(xyzfile)
smiles = canonicalize(smiles)
return smiles_lst2coulomb([smiles])
#2D_format = ['SMILES', 'SELFIES', 'Graph2D', 'PyG', 'DGL', 'ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem']
#3D_format = ['Graph3D', 'Coulumb']
## XXX2smiles
def molfile2smiles(molfile):
"""convert molfile into SMILES string
Args:
molfile: str, a file.
Returns:
smiles: str, SMILES strings
"""
mol = Chem.MolFromMolFile(molfile)
smiles = Chem.MolToSmiles(mol)
smiles = canonicalize(smiles)
return smiles
def mol2file2smiles(molfile):
"""convert mol2file into SMILES string
Args:
mol2file: str, a file.
Returns:
smiles: str, SMILES strings
"""
mol = Chem.MolFromMol2File(molfile)
smiles = Chem.MolToSmiles(mol)
smiles = canonicalize(smiles)
return smiles
## smiles2xxx
convert_dict = {
'SMILES': ['SELFIES', 'Graph2D', 'PyG', 'DGL', 'ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem'],
'SELFIES': ['SMILES', 'Graph2D', 'PyG', 'DGL', 'ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem'],
'mol': ['SMILES', 'SELFIES', 'Graph2D', 'PyG', 'DGL', 'ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem'],
'mol2': ['SMILES', 'SELFIES', 'Graph2D', 'PyG', 'DGL', 'ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem'],
'SDF': ['SMILES', 'SELFIES', 'Graph3D', 'Coulumb'],
'XYZ': ['SMILES', 'SELFIES', 'Graph3D', 'Coulumb'],
}
fingerprints_list = ['ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem']
twoD_format = ['SMILES', 'SELFIES', 'mol', 'mol2', ]
threeD_format = ['SDF', 'XYZ', ]
class MolConvert:
"""MolConvert: convert the molecule from src formet to dst format.
Example:
convert = MolConvert(src = ‘SMILES’, dst = ‘Graph2D’)
g = convert(‘Clc1ccccc1C2C(=C(/N/C(=C2/C(=O)OCC)COCCN)C)\C(=O)OC’)
# g: graph with edge, node features
g = convert(['Clc1ccccc1C2C(=C(/N/C(=C2/C(=O)OCC)COCCN)C)\C(=O)OC',
'CCCOc1cc2ncnc(Nc3ccc4ncsc4c3)c2cc1S(=O)(=O)C(C)(C)C'])
# g: a list of graphs with edge, node features
if src is 2D, dst can be only 2D output
if src is 3D, dst can be both 2D and 3D outputs
src: 2D - [SMILES, SELFIES]
3D - [SDF file, XYZ file]
dst: 2D - [2D Graph (+ PyG, DGL format), Canonical SMILES, SELFIES, Fingerprints]
3D - [3D graphs (adj matrix entry is (distance, bond type)), Coulumb Matrix]
"""
def __init__(self, src = 'SMILES', dst = 'Graph2D', radius = 2, nBits = 1024):
self._src = src
self._dst = dst
self._radius = radius
self._nbits = nBits
self.convert_dict = convert_dict
if 'SELFIES' == src or 'SELFIES' == dst:
try:
import selfies as sf
global sf
except:
raise Exception("Please install selfies via 'pip install selfies'")
if 'Coulumb' == dst:
try:
from chemml.chem import CoulombMatrix, Molecule
global CoulombMatrix, Molecule
except:
raise Exception("Please install chemml via 'pip install pybel' and 'pip install chemml'. ")
if 'PyG' == dst:
try:
import torch
from torch_geometric.data import Data
global torch
global Data
except:
raise Exception("Please install PyTorch Geometric via 'https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html'.")
if 'DGL' == dst:
try:
import dgl
global dgl
except:
raise Exception("Please install DGL via 'pip install dgl'.")
try:
assert src in self.convert_dict
except:
raise Exception("src format is not supported")
try:
assert dst in self.convert_dict[src]
except:
raise Exception('It is not supported to convert src to dst.')
if src in twoD_format:
### 1. src -> SMILES
if src == "SMILES":
f1 = canonicalize
elif src == "SELFIES":
f1 = selfies2smiles
elif src == "mol":
f1 = molfile2smiles
elif src == "mol2":
f1 = mol2file2smiles
### 2. SMILES -> all
# 'SMILES', 'SELFIES', 'Graph2D', 'PyG', 'DGL', 'ECFP2', 'ECFP4', 'ECFP6', 'MACCS', 'Daylight', 'RDKit2D', 'Morgan', 'PubChem'
if dst == 'SMILES':
f2 = canonicalize
elif dst == 'SELFIES':
f2 = smiles2selfies
elif dst == "Graph2D":
f2 = smiles2graph2D
elif dst == "PyG":
f2 = smiles2PyG
elif dst == "DGL":
f2 = smiles2DGL
elif dst == "ECFP2":
f2 = smiles2ECFP2
elif dst == "ECFP4":
f2 = smiles2ECFP4
elif dst == "MACCS":
f2 = smiles2maccs
elif dst == "Daylight":
f2 = smiles2daylight
elif dst == "RDKit2D":
f2 = smiles2rdkit2d
elif dst == "Morgan":
f2 = smiles2morgan
elif dst == 'PubChem':
f2 = smiles2pubchem
self.func = lambda x:f2(f1(x))
elif src in threeD_format:
pass
### load from xyz file, input is a filename (str), only contain one smiles
if src == 'XYZ' and dst == 'SMILES':
self.func = xyzfile2smiles
elif src == 'XYZ' and dst == 'SELFIES':
self.func = xyzfile2selfies
elif src == 'XYZ' and dst == 'Graph3D':
self.func = xyzfile2graph3d
elif src == 'XYZ' and dst == 'Coulumb':
self.func = xyzfile2coulomb
### SDF file
elif src == 'SDF' and dst == 'Graph3D':
self.func = sdffile2graph3d_lst
elif src == 'SDF' and dst == 'SMILES':
self.func = sdffile2smiles_lst
elif src == 'SDF' and dst == 'SELFIES':
self.func = sdffile2selfies_lst
elif src == 'SDF' and dst == 'Coulumb':
self.func = sdffile2coulomb
def __call__(self, x):
if type(x) == np.ndarray:
x = x.tolist()
if type(x) == str:
if self.func != smiles2morgan:
return self.func(x)
else:
return self.func(x, radius = self._radius, nBits = self._nbits)
elif type(x) == list:
if self.func != smiles2morgan:
out = list(map(self.func, x))
else:
lst = []
for x0 in x:
lst.append(self.func(x0, radius = self._radius, nBits = self._nbits))
out = lst
if self._dst in fingerprints_list:
out = np.array(out)
return out
@staticmethod
def eligible_format(src = None):
'''
given a src format, output all the available format of the src format
Example
MoleculeLink.eligible_format('SMILES')
## ['Graph', 'SMARTS', ...]
'''
if src is not None:
try:
assert src in convert_dict
except:
raise Exception("src format is not supported")
return convert_dict[src]
else:
return convert_dict
|
the-stack_0_26902
|
#!/usr/bin/env python
"""
Mathematical tools for audio signal processing.
"""
#
# Authors: Juan Sebastian ULLOA <[email protected]>
# Sylvain HAUPERT <[email protected]>
#
# License: New BSD License
# =============================================================================
# Load the modules
# =============================================================================
# Import external modules
import matplotlib.pyplot as plt
import numpy as np
from numpy import mean, median, var
from scipy.ndimage.filters import uniform_filter1d # for fast running mean
from scipy.signal import periodogram, welch
import pandas as pd
# min value
import sys
_MIN_ = sys.float_info.min
# Import internal modules
from maad.util import linear_scale
#%%
# =============================================================================
# public functions
# =============================================================================
def running_mean(x, N, mode="nearest"):
"""
Compute fast running mean for a window size N.
Parameters
----------
x : 1d ndarray of scalars
Vector
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional,
The `mode` parameter determines how the input array is extended
when the filter overlaps a border. Default is 'nearest'. Behavior
for each valid value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.
N : int
length of window to compute the mean
Returns
-------
x_mean : 1d ndarray of scalars
Vector with the same dimensions than the original variable x
Examples
--------
>>> maad.util.running_mean([2, 8, 0, 4, 1, 9, 9, 0], N=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
x_mean = uniform_filter1d(x, size=N, mode="nearest")
return x_mean
#%%
def get_unimode (X, mode ='median', axis=1, N=7, N_bins=100, verbose=False):
"""
Get the statistical mode or modal value which is the most common number in the
dataset.
Parameters
----------
X : 1d or 2d ndarray of scalars
Vector or matrix
mode : str, optional, default is 'median'
Select the mode to remove the noise
Possible values for mode are :
- 'ale' : Adaptative Level Equalization algorithm [Lamel & al. 1981]
- 'median' : subtract the median value
- 'mean' : subtract the mean value (DC)
axis : integer, default is 1
if matrix, estimate the mode for each row (axis=0) or each column (axis=1)
N : int (only for mode = "ale")
length of window to compute the running mean of the histogram
N_bins : int (only for mode = "ale")
number of bins to compute the histogram
verbose : boolean, optional, default is False
print messages into the consol or terminal if verbose is True
Returns
-------
unimode_value : float
The most common number in the dataset
Notes
-----
ale : Adaptative Level Equalization algorithm from Lamel et al., 1981 :
L.F. Lamel, L.R. Rabiner, A.E. Rosenberg, J.G. Wilpon
An improved endpoint detector for isolated word recognition
IEEE Trans. ASSP, ASSP-29 (1981), pp. 777-785
`DOI: 10.1109/TASSP.1981.1163642 <https://doi.org/10.1109/TASSP.1981.1163642>`_
Examples
--------
This function is interesting to obtain the background noise (BGN) profile (e.g. frequency bin
by frequency bin) of a spectrogram
>>> w, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx_power,tn,fn,_ = maad.sound.spectrogram(w,fs,window='hanning',noverlap=512, nFFT=1024)
>>> Sxx_dB = maad.util.power2dB(Sxx_power)
>>> BGN_med = maad.util.get_unimode (Sxx_dB, mode='median', axis=1)
>>> import matplotlib.pyplot as plt
>>> plt.plot(fn,maad.util.mean_dB(Sxx_dB,axis=1))
>>> plt.plot(fn,BGN_med)
Extract the background noise from mean
>>> BGN_mean = maad.util.get_unimode (Sxx_dB, mode='mean', axis=1)
>>> plt.plot(fn,BGN_mean)
Extract the background noise from ale (i.e. unimode)
>>> BGN_ale = maad.util.get_unimode (Sxx_dB, mode='ale', N=7, N_bins=100, axis=1)
>>> plt.plot(fn,BGN_ale)
"""
if X.ndim ==2:
if axis == 0:
X = X.transpose()
axis = 1
elif X.ndim ==1:
axis = 0
if mode=='ale':
if X.ndim ==2:
unimode_value = []
for i, x in enumerate(X):
# Min and Max of the envelope (without taking into account nan)
x_min = np.nanmin(x)
x_max = np.nanmax(x)
# Compute a 50-bin histogram ranging between Min and Max values
hist, bin_edges = np.histogram(x, bins=N_bins, range=(x_min, x_max))
# smooth the histogram by running mean
hist_smooth = running_mean(hist, N, mode="nearest")
# find the maximum of the peak with quadratic interpolation
# don't take into account the first 4 bins.
imax = np.argmax(hist_smooth)
unimode_value.append(bin_edges[imax])
# transpose the vector
unimode_value = np.asarray(unimode_value)
unimode_value = unimode_value.transpose()
else:
x = X
# Min and Max of the envelope (without taking into account nan)
x_min = np.nanmin(x)
x_max = np.nanmax(x)
# Compute a 50-bin histogram ranging between Min and Max values
hist, bin_edges = np.histogram(x, bins=N_bins, range=(x_min, x_max))
# smooth the histogram by running mean
hist_smooth = running_mean(hist, N, mode="nearest")
# find the maximum of the peak with quadratic interpolation
imax = np.argmax(hist_smooth)
# assuming an additive noise model : noise_bckg is the max of the histogram
# as it is an histogram, the value is unimode_value = bin_edges_interp[np.argmax(hist_interp)]
unimode_value = bin_edges[imax]
elif mode=='median':
unimode_value = median(X, axis=axis)
elif mode=='mean':
unimode_value = mean(X, axis=axis)
return unimode_value
#%%
def rms(s):
"""
Compute the root-mean-square (RMS) level of an input signal.
RMS is defined as the square root of the arithmetic mean of the square of a set of numbers [1]. The RMS is used to estimate de mean amplitude level of an audio signal or any alternative time series.
Parameters
----------
s : 1D array
Input signal to process
Returns
-------
rms: float
Root mean square of input signal
References
----------
.. [1] 'Root mean square' (2010). Wikipedia. Available at https://en.wikipedia.org/wiki/Root_mean_square
Examples
--------
>>> from maad import sound, util
>>> s, fs = sound.load('../data/spinetail.wav')
>>> rms_value = util.rms(s)
"""
return np.sqrt(np.mean(s**2))
#%%
def skewness (x, axis=None):
"""
Compute the skewness (asymetry) of an audio signal.
Parameters
----------
x : ndarray of floats
1d signal or 2d matrix
axis : integer, optional, default is None
select the axis to compute the kurtosis
The default is to compute the mean of the flattened array.
Returns
-------
sk : float or ndarray of floats
skewness of x
if x is a 1d vector => single value
if x is a 2d matrix => array of values corresponding to the number of
points in the other axis
Examples
--------
>>> from maad import sound, util
>>> s, fs = sound.load('../data/spinetail.wav')
>>> util.skewness(s)
-0.006547980427883208
"""
if isinstance(x, (np.ndarray)) == True:
if axis is None:
# flatten the array
Nf = len(np.ndarray.flatten((x)))
else:
Nf = x.shape[axis]
mean_x = np.mean(x, axis=axis)
std_x = np.std(x, axis=axis)
if axis == 0 :
z = x - mean_x[np.newaxis, ...]
else :
z = x - mean_x[..., np.newaxis]
sk = (np.sum(z**3, axis=axis)/(Nf-1))/std_x**3
else:
print ("WARNING: type of x must be ndarray")
sk = None
# test if ku is an array with a single value
if (isinstance(sk, (np.ndarray)) == True) and (len(sk) == 1):
sk = float(sk)
return sk
#%%
def kurtosis (x, axis=None):
"""
Compute the kurtosis (tailedness or curved or arching) of an audio signal.
Parameters
----------
x : ndarray of floats
1d signal or 2d matrix
axis : integer, optional, default is None
select the axis to compute the kurtosis
The default is to compute the mean of the flattened array.
Returns
-------
ku : float or ndarray of floats
kurtosis of x
if x is a 1d vector => single value
if x is a 2d matrix => array of values corresponding to the number of
points in the other axis
Examples
--------
>>> from maad import sound, util
>>> s, fs = sound.load('../data/spinetail.wav')
>>> util.kurtosis(s)
24.711610834321217
"""
if isinstance(x, (np.ndarray)) == True:
if axis is None:
# flatten the array
Nf = len(np.ndarray.flatten((x)))
else:
Nf = x.shape[axis]
mean_x = np.mean(x, axis=axis)
std_x = np.std(x, axis=axis)
if axis==0 :
z = x - mean_x[np.newaxis, ...]
else:
z = x - mean_x[..., np.newaxis]
ku = (np.sum(z**4, axis=axis)/(Nf-1))/std_x**4
else:
print ("WARNING: type of x must be ndarray")
ku = None
# test if ku is an array with a single value
if (isinstance(ku, (np.ndarray)) == True) and (len(ku) == 1):
ku = float(ku)
return ku
#%%
def moments (X, axis=None):
"""
Computes the first 4th moments of a vector (1d, ie. spectrum or waveform)
or spectrogram (2d)
- mean
- variance
- skewness
- kurtosis
Parameters
----------
X : ndarray of floats
vector (1d : spectrum, waveform) or matrix (2d : spectrogram).
axis : interger, optional, default is None
if spectrogram (2d), select the axis to estimate the moments.
Returns
-------
mean : float
mean of X
var : float
variance of X
skew : float
skewness of X
kurt : float
kurtosis of X
Examples
--------
>>> from maad import sound, util
>>> s, fs = sound.load('../data/spinetail.wav')
>>> mean, var, skew, kurt = util.moments(s)
>>> print ('mean:%2.4f / var:%2.4f / skew:%2.4f / kurt:%2.4f' %(mean, var, skew, kurt))
mean:-0.0000 / var:0.0012 / skew:-0.0065 / kurt:24.7116
"""
# force P to be ndarray
X = np.asarray(X)
return mean(X, axis), var(X, axis), skewness(X, axis), kurtosis(X, axis)
#%%
def entropy (x, axis=0):
"""
Compute the entropy of a vector (waveform) or matrix (spectrogram).
Parameters
----------
x : ndarray of floats
x is a vector (1d) or a matrix (2d)
axis : int, optional, default is 0
select the axis where the entropy is computed
if x is a vector, axis=0
if x is a 2d ndarray, axis=0 => rows, axis=1 => columns
Returns
-------
H : float or ndarray of floats
entropy of x
Examples
--------
>>> from maad import sound, util
>>> s, fs = sound.load('../data/spinetail.wav')
>>> H = util.entropy(s)
>>> print ('Entropy is %2.4f' %H)
Entropy is 0.9998
"""
if isinstance(x, (np.ndarray)) == True:
if x.ndim > axis:
if x.shape[axis] == 0:
print ("WARNING: x is empty")
H = None
elif x.shape[axis] == 1:
H = 0 # null entropy
elif x.all() == 0:
if x.ndim == 1 : # case vector
H = 0 # null entropy
else : # case matrix
if axis == 0 : H = np.zeros(x.shape[1]) # null entropy
if axis == 1 : H = np.zeros(x.shape[0]) # null entropy
else:
# if datain contains negative values -> rescale the signal between
# between posSitive values (for example (0,1))
if np.min(x)<0:
x = linear_scale(x,minval=0,maxval=1)
# length of datain along axis
n = x.shape[axis]
# Tranform the signal into a Probability mass function (pmf)
# Sum(pmf) = 1
if axis == 0 :
pmf = x/np.sum(x,axis)
elif axis == 1 :
pmf = (x.transpose()/np.sum(x,axis)).transpose()
pmf[pmf==0] = _MIN_
#normalized by the length : H=>[0,1]
H = -np.sum(pmf*np.log(pmf),axis)/np.log(n)
else:
print ("WARNING :axis is greater than the dimension of the array")
H = None
else:
print ("WARNING: x must be ndarray")
H = None
return H
|
the-stack_0_26903
|
from collections import defaultdict
from urllib.parse import urlparse
import dvc.output as output
from dvc.dependency.azure import AzureDependency
from dvc.dependency.gs import GSDependency
from dvc.dependency.hdfs import HDFSDependency
from dvc.dependency.http import HTTPDependency
from dvc.dependency.https import HTTPSDependency
from dvc.dependency.local import LocalDependency
from dvc.dependency.param import ParamsDependency
from dvc.dependency.s3 import S3Dependency
from dvc.dependency.ssh import SSHDependency
from dvc.output.base import BaseOutput
from dvc.remote import Remote
from dvc.scheme import Schemes
from .repo import RepoDependency
DEPS = [
AzureDependency,
GSDependency,
HDFSDependency,
HTTPDependency,
HTTPSDependency,
S3Dependency,
SSHDependency,
# NOTE: LocalDependency is the default choice
]
DEP_MAP = {
Schemes.LOCAL: LocalDependency,
Schemes.SSH: SSHDependency,
Schemes.S3: S3Dependency,
Schemes.AZURE: AzureDependency,
Schemes.GS: GSDependency,
Schemes.HDFS: HDFSDependency,
Schemes.HTTP: HTTPDependency,
Schemes.HTTPS: HTTPSDependency,
}
# NOTE: schema for dependencies is basically the same as for outputs, but
# without output-specific entries like 'cache' (whether or not output is
# cached, see -o and -O flags for `dvc run`) and 'metric' (whether or not
# output is a metric file and how to parse it, see `-M` flag for `dvc run`).
SCHEMA = output.SCHEMA.copy()
del SCHEMA[BaseOutput.PARAM_CACHE]
del SCHEMA[BaseOutput.PARAM_METRIC]
SCHEMA.update(RepoDependency.REPO_SCHEMA)
SCHEMA.update(ParamsDependency.PARAM_SCHEMA)
def _get(stage, p, info):
parsed = urlparse(p) if p else None
if parsed and parsed.scheme == "remote":
remote = Remote(stage.repo, name=parsed.netloc)
return DEP_MAP[remote.scheme](stage, p, info, remote=remote)
if info and info.get(RepoDependency.PARAM_REPO):
repo = info.pop(RepoDependency.PARAM_REPO)
return RepoDependency(repo, stage, p, info)
if info and info.get(ParamsDependency.PARAM_PARAMS):
params = info.pop(ParamsDependency.PARAM_PARAMS)
return ParamsDependency(stage, p, params)
for d in DEPS:
if d.supported(p):
return d(stage, p, info)
return LocalDependency(stage, p, info)
def loadd_from(stage, d_list):
ret = []
for d in d_list:
p = d.pop(BaseOutput.PARAM_PATH, None)
ret.append(_get(stage, p, d))
return ret
def loads_from(stage, s_list, erepo=None):
assert isinstance(s_list, list)
ret = []
for s in s_list:
info = {RepoDependency.PARAM_REPO: erepo} if erepo else {}
ret.append(_get(stage, s, info))
return ret
def _merge_params(s_list):
d = defaultdict(list)
default_file = ParamsDependency.DEFAULT_PARAMS_FILE
for key in s_list:
if isinstance(key, str):
d[default_file].append(key)
continue
if not isinstance(key, dict):
msg = "Only list of str/dict is supported. Got: "
msg += f"'{type(key).__name__}'."
raise ValueError(msg)
for k, params in key.items():
if not isinstance(params, list):
msg = "Expected list of params for custom params file "
msg += f"'{k}', got '{type(params).__name__}'."
raise ValueError(msg)
d[k].extend(params)
return d
def loads_params(stage, s_list):
d = _merge_params(s_list)
return [
ParamsDependency(stage, path, params) for path, params in d.items()
]
|
the-stack_0_26906
|
import typing
import re
from pydantic import BaseModel
from better_graph.utils.base_model import InputModel, OutputModel
from better_graph.utils.check_type import check_instance
from better_graph.utils.str_converter import StringConverter as sc, StringConverter
class TypingParser:
"""
TODO: // NOT FOR NOW //
Implementation of __NODE__type
// NOT FOR NOW //
"""
custom_types: typing.Dict = dict()
@classmethod
def parse(
cls,
value: typing.Union[str, dict],
key: str = None,
make_optional: bool = False,
is_query_input: bool = None
) -> typing.Union[typing.Type, str]:
if isinstance(value, dict):
if make_optional:
return typing.Optional[cls._parse_nested_dict(
value, key=key, make_optional=make_optional, is_query_input=is_query_input
)]
return cls._parse_nested_dict(value, key)
check_instance(value, str)
if is_query_input:
return typing.Optional[typing.Union[str, int]]
if make_optional:
if not isinstance(make_optional, bool):
raise TypeError('make_optional must be type: bool')
return eval(cls._make_str_optional(cls._parse_str(value)))
return eval(cls._parse_str(value))
@classmethod
def parse_fields(
cls,
fields: typing.Dict,
excluded_fields: typing.List[str],
make_optional: bool = False,
is_query_input: bool = False
) -> dict:
return {
k: TypingParser.parse(v, key=k, make_optional=make_optional, is_query_input=is_query_input) \
for k, v in fields.items() \
if k not in excluded_fields
}
@classmethod
def get_class_model(
cls,
name: str,
fields: dict,
excluded_fields: list,
make_optional: bool = False,
is_input: bool = False,
is_query: bool = False
):
is_query_input = (is_query and is_input)
fields_ = cls.parse_fields(
fields=fields,
excluded_fields=excluded_fields,
make_optional=make_optional,
is_query_input=is_query_input
)
return type(
StringConverter.snake_to_pascal(name),
(OutputModel if not is_input else InputModel,),
{
'__annotations__': fields_
}
)
@classmethod
def _parse_nested_dict(
cls,
dict_: dict,
key: str,
make_optional: bool = False,
is_query_input: str = None
):
nested_name = '{}Model'.format(sc.snake_to_pascal(key))
nested_fields = {
k: cls.parse(v, key=k, make_optional=make_optional, is_query_input=is_query_input) for k, v in dict_.items()
}
return type(
nested_name,
(BaseModel,),
{'__annotations__': nested_fields}
)
@classmethod
def _make_str_optional(cls, str_: str) -> str:
if not isinstance(str_, str):
raise TypeError('Please provide an argument of type: str')
return 'typing.Optional[{}]'.format(str_)
@classmethod
def _parse_str(cls, str_: str):
list_ = re.sub("[0-9a-zA-Z]", " ", str_).split(' ') # Remove all alphanumerical
pre_format = '{}' + '{}'.join([s for s in list_ if s])
list__ = re.sub("[^0-9a-zA-Z]+", '_', str_).split('_') # Remove non-alphanumerical
sanitized_ = [cls._str_to_type(s) for s in list__ if s]
return pre_format.format(*sanitized_)
@classmethod
def _str_to_type(cls, str_: str):
builtins = {
'int': int,
'float': float,
'str': str,
'list': list,
'dict': dict,
'bool': bool
}
if str_ == 'None' or str_ == 'NoneType':
return None
if str_ in builtins:
return str_
else:
if str_ in typing.__dict__:
return typing.__dict__[str_]
else:
raise TypeError('invalid type of element: {}'.format(str(str_)))
|
the-stack_0_26907
|
"""Utilities for parsing document handles and series information.
"""
__all__ = ('SERIES', 'DOCUMENT_HANDLE_PATTERN')
import re
SERIES = {
'LPM': 'LSST Project Management',
'LSE': 'LSST Systems Engineering',
'LDM': 'LSST Data Management',
'DMTR': 'LSST DM Test Report',
'SQR': 'SQuaRE Technical Note',
'DMTN': 'Data Management Technical Note',
'SMTN': 'Simulations Technical Note',
'PSTN': 'Project Science Team Technical Note',
'SITCOMTN': 'Systems Integration, Testing, and Commissioning Technical '
'Note',
'OPSTN': 'LSST Operations Technical Note',
'TSTN': 'LSST Telescope & Site Technical Note',
}
"""Mapping between LSST document series (handle prefixes) and the title of the
series.
"""
DOCUMENT_HANDLE_PATTERN = re.compile(
r'^(?P<series>' + '|'.join([h for h in SERIES]) + ')'
r'-(?P<serial>\d+)',
re.IGNORECASE)
"""Pattern that matches the handle of any LSST document.
Notes
-----
The pattern exposes two named groups in the match object:
- ``'series'``. The document series. For example, ``'LDM'``.
- ``'serial'``. The serial number, as a `str`. For example, ``'151'``.
Note that the pattern is **case insensitive.** If you input text is normalized
to lower case, it will still match, but the series will be in lower case.
Examples
--------
>>> m = DOCUMENT_HANDLE_PATTERN.match('LDM-151'.lower())
>>> m.group('series')
'LDM'
>>> m.group('serial')
'151'
"""
|
the-stack_0_26911
|
from unittest import mock
from django.apps import apps
from django.test import TestCase
from django.test.utils import modify_settings
from django.urls import path
from django.views.generic import View
@modify_settings(INSTALLED_APPS={
'append': 'tests._site.apps.myapp.apps.TestConfig',
})
class OscarConfigTestCase(TestCase):
def setUp(self):
self.myapp = apps.get_app_config('myapp')
def test_get_permissions_required_uses_map(self):
perms = self.myapp.get_permissions('index')
self.assertEqual(perms, 'is_staff')
def test_permissions_required_falls_back_to_default(self):
perms = self.myapp.get_permissions('notinmap')
self.assertEqual(perms, 'is_superuser')
@mock.patch('oscar.views.decorators.permissions_required')
def test_get_url_decorator_fetches_correct_perms(self, mock_permissions_required):
pattern = path('', View.as_view(), name='index')
self.myapp.get_url_decorator(pattern)
mock_permissions_required.assert_called_once_with('is_staff', login_url=None)
def test_post_process_urls_adds_decorator(self):
fake_decorator = mock.Mock()
fake_decorator.return_value = 'fake_callback'
self.myapp.get_url_decorator = mock.Mock()
self.myapp.get_url_decorator.return_value = fake_decorator
pattern = path('', View.as_view(), name='index')
processed_patterns = self.myapp.post_process_urls([pattern])
self.myapp.get_url_decorator.assert_called_once_with(pattern)
self.assertEqual(processed_patterns[0].callback, 'fake_callback')
|
the-stack_0_26914
|
#!/usr/bin/python
import numpy as np
import os
from functools import partial
import json
import argparse
from sklearn.model_selection import train_test_split
try:
import bnnbench
except:
import sys
sys.path.append(os.path.expandvars('$BNNBENCHPATH'))
import bnnbench.utils.data_utils
from bnnbench.models import MLP, MCDropout, MCBatchNorm, DNGO, DeepEnsemble
from bnnbench.config import globalConfig as conf
from bnnbench import _log as bnnbench_logger
from bnnbench.toy_functions import parameterisedObjectiveFunctions, nonParameterisedObjectiveFunctions, SamplingMethods
from bnnbench.toy_functions.toy_1d import ObjectiveFunction1D
from bnnbench.toy_functions.sampler import sample_1d_func
from bnnbench.utils.attrDict import AttrDict
import bnnbench.utils.universal_utils as utils
json_config_keys = utils.config_top_level_keys
# json_config_keys = AttrDict()
# json_config_keys.obj_func = "objective_function"
# json_config_keys.dataset_size = "dataset_size"
# json_config_keys.test_frac = "testset_fraction"
# json_config_keys.mparams = "model_parameters"
# json_config_keys.eparams = "experiment_parameters"
model_types = AttrDict()
model_types.mlp = MLP
model_types.mcdropout = MCDropout
model_types.mcbatchnorm = MCBatchNorm
model_types.dngo = DNGO
model_types.ensemble = DeepEnsemble
# ----------------------------------------------------------------------------------------------------------------------
# --------------------------------------Set up default experiment parameters--------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
config = AttrDict()
config.OBJECTIVE_FUNC = nonParameterisedObjectiveFunctions.infinityGO7
config.DATASET_SIZE = 100
config.TEST_FRACTION = 0.2
config.model_params = {}
config.exp_params = {}
config.mtype = MLP
# ----------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------Check command line args--------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def handle_cli():
print("Handling command line arguments.")
# global config
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('--model', type=str, default='mlp',
help='Case-insensitive string indicating the type of model to be used for this experiment. '
'Valid options are: ["mlp", "mcdropout", "mcbatchnorm", "ensemble", "dngo"]')
parser.add_argument('--config', type=str, default=None,
help='Filename of JSON file containing experiment configuration. If not provided, default '
'configurations are used.')
for argument, helptext in conf.cli_arguments.items():
argname = '--' + argument
defaultval = conf.defaults[argument]
if type(defaultval) is bool:
action = "store_false" if defaultval else "store_true"
elif isinstance(defaultval, str):
action = "store"
parser.add_argument(argname, default=None, action=action, help=helptext)
parser.add_argument('--plotdata', action='store_true', default=False, required=False,
help='When given, generates a plot of the training/test data. Only supported for 1D '
'datasets.')
parser.add_argument('--summarize', action='store_true', default=False, required=False,
help='When given, generates a summary of the network generated by the model using '
'torchsummary.')
args = parser.parse_args()
config.plotdata = args.plotdata
config.summarize = args.summarize
mtype = str.lower(args.model)
if mtype not in model_types:
raise RuntimeError("Unknown model type %s specified." % mtype)
else:
config.mtype = model_types[mtype]
default_model_params = model_types[mtype]._default_model_params._asdict()
if args.config is not None:
print("--config flag detected.")
config_file_path = utils.standard_pathcheck(args.config)
with open(config_file_path, 'r') as fp:
new_config = json.load(fp)
if json_config_keys.obj_func in new_config:
print("Attempting to fetch objective function %s" % new_config[json_config_keys.obj_func])
if isinstance(new_config[json_config_keys.obj_func], dict):
raise RuntimeError("This script no longer supports datasets as objective functions and until further "
"notice, can only be used for toy functions specified using the old interface by "
"specifying the complete toy function name as defined in bnnbench.toy_functions.toy_1d "
"as a string value for the top-level key %s in the JSON config file." %
json_config_keys.obj_func)
# utils.parse_objective(config=new_config[json_config_keys.obj_func], out=config)
else:
from bnnbench.toy_functions.toy_1d import get_func_from_attrdict
config.OBJECTIVE_FUNC = get_func_from_attrdict(new_config[json_config_keys.obj_func],
nonParameterisedObjectiveFunctions)
print("Fetched objective function.")
if json_config_keys.dataset_size in new_config:
config.DATASET_SIZE = int(new_config[json_config_keys.dataset_size])
print("Using dataset size %d provided by config file." % config.DATASET_SIZE)
if json_config_keys.test_frac in new_config:
config.TEST_FRACTION = float(new_config[json_config_keys.test_frac])
print("Using test set fraction %.3f provided by config file." % config.TEST_FRACTION)
if json_config_keys.mparams in new_config:
config_model_params = new_config[json_config_keys.mparams]
print("Using model parameters provided by config file.")
for key, val in default_model_params.items():
config.model_params[key] = val if config_model_params.get(key, None) is None else \
config_model_params[key]
print("Final model parameters: %s" % config.model_params)
if json_config_keys.eparams in new_config:
print("Using experiment parameters provided by config file.")
config_exp_params = new_config[json_config_keys.eparams]
for key, val in conf.defaults.items():
if key in conf.cli_arguments:
# Only handle those settings that can be modified using the CLI or JSON config file.
# Priorities: 1. CLI, 2. Config file, 3. Defaults
clival = getattr(args, key)
config.exp_params[key] = clival if clival is not None else val if \
config_exp_params.get(key, None) is None else config_exp_params[key]
print("Final experiment parameters: %s" % config.exp_params)
else:
print("No config file detected, using default parameters.")
config.model_params = default_model_params
config.exp_params = config.default_exp_params
print("Finished reading command line arguments.")
def perform_experiment():
model = config.mtype(model_params=config.model_params)
# if config.exp_params['tbdir'] is None:
if config.exp_params.get('tbdir', None) in [None, '']:
config.exp_params['tbdir'] = model.modeldir
print(f"Tensorboard directory set to: {config.exp_params['tbdir']}")
conf.params = config.exp_params
rng: np.random.RandomState = model.rng
mean_only = True if config.mtype is model_types.mlp else False
print("Saving new model to: %s" % config.model_params["model_path"])
# -----------------------------------------------Generate data------------------------------------------------------
if isinstance(config.OBJECTIVE_FUNC, AttrDict):
X, y = bnnbench.utils.data_utils.data_generator(config.OBJECTIVE_FUNC)
print(f"Loaded dataset with feature set of shape {X.shape} and targets of shape {y.shape}")
plotting1d = False
else:
X, y = sample_1d_func(config.OBJECTIVE_FUNC, rng=rng, nsamples=config.DATASET_SIZE,
method=SamplingMethods.RANDOM)
plotting1d = True
# I give up. New rule: No more vectors.
if len(X.shape) == 1:
X = X[:, None]
if len(y.shape) == 1:
y = y[:, None]
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=config.TEST_FRACTION, random_state=rng,
shuffle=True)
print(f"Shapes after splitting: X, y - {Xtrain.shape}, {ytrain.shape}")
# -----------------------------------------------Set up plotting----------------------------------------------------
if plotting1d:
domain = config.OBJECTIVE_FUNC.domain
grid = np.linspace(domain[0], domain[1], max(1000, config.DATASET_SIZE * 10))
fvals = config.OBJECTIVE_FUNC(grid)
tb_plotter = partial(utils.network_output_plotter_toy, grid=grid, fvals=fvals, trainx=Xtrain, trainy=ytrain,
plot_variances=not mean_only)
# -------------------------------------------------Let it roll------------------------------------------------------
model.preprocess_training_data(Xtrain, ytrain)
if plotting1d:
model.train_network(plotter=tb_plotter)
else:
model.train_network() # Don't save interim progress plots
predicted_y = model.predict(Xtest)
savedir = utils.ensure_path_exists(model.modeldir)
if mean_only:
# out = np.zeros((Xtest.shape[0], Xtest.shape[1] + 1))
out = np.concatenate((Xtest, predicted_y), axis=1)
else:
# Assume the model predicted means and variances, returned as a tuple
# Treat both elements of the tuple as individual numpy arrays
out = np.concatenate((Xtest, predicted_y[0], predicted_y[1]), axis=1)
print(f"Saving model performance results in {savedir}")
if config.plotdata:
from bnnbench.utils.universal_utils import simple_plotter
import matplotlib.pyplot as plt
traindata = np.concatenate((Xtrain, ytrain), axis=1)
testdata = np.concatenate((Xtest, ytest), axis=1)
print(f"Displaying:\nTraining data of shape {traindata.shape}\nTest data of shape {testdata.shape}\n"
f"Prediction data of shape {out.shape}")
fig = simple_plotter(
pred=out,
train=traindata,
test=testdata,
plot_variances=not mean_only
)
plt.show()
np.save(file=os.path.join(savedir, 'trainset'), arr=np.concatenate((Xtrain, ytrain), axis=1), allow_pickle=True)
np.save(file=os.path.join(savedir, 'testset'), arr=np.concatenate((Xtest, ytest), axis=1), allow_pickle=True)
np.save(file=os.path.join(savedir, 'test_predictions'), arr=out, allow_pickle=True)
utils.make_model_params_json_compatible(config.model_params)
utils.make_exp_params_json_compatible(config.exp_params)
jdict = {
json_config_keys.obj_func: str(config.OBJECTIVE_FUNC),
json_config_keys.dataset_size: config.DATASET_SIZE,
json_config_keys.test_frac: config.TEST_FRACTION,
json_config_keys.mparams: config.model_params,
json_config_keys.eparams: config.exp_params
}
with open(os.path.join(savedir, 'config.json'), 'w') as fp:
try:
json.dump(jdict, fp, indent=4)
except TypeError as e:
print("Could not write configuration file for config:\n%s" % jdict)
print("Finished experiment.")
if config.summarize:
model.network.to('cuda')
from torchsummary import summary
summary(model.network, input_size=(model.batch_size, model.input_dims))
if __name__ == '__main__':
handle_cli()
perform_experiment()
|
the-stack_0_26916
|
#!/usr/bin/env python3
import os
resources_dir = '../../Assets/Resources/'
out_filename = os.path.join(resources_dir, 'LocalizedResourcePaths.txt')
with open(out_filename, 'w', encoding='utf-8', newline='\n') as f:
for root, dirs, files in os.walk(
os.path.join(resources_dir, 'LocalizedResources')):
for file in sorted(files):
if file.endswith('.meta'):
continue
filename = os.path.join(root, os.path.splitext(file)[0])
filename = filename.replace('\\', '/')
filename = filename.replace(resources_dir + '/', '')
print(filename)
f.write(filename + '\n')
|
the-stack_0_26918
|
class Solution:
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
x={}
l=0
r=0
max_len=1
if len(s)*k==0:
return 0
while r<len(s):
x[s[r]]=r
r+=1
if len(set(x))>k:
del_idx = min(x.values())
del x[s[del_idx]]
l = del_idx + 1
max_len = max(max_len, r - l)
return max_len
|
the-stack_0_26919
|
# coding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import pandas as pd
import tushare as QATs
from QUANTAXIS.QAUtil import (QA_util_date_int2str, QA_util_date_stamp,
QA_util_log_info, QA_util_to_json_from_pandas)
def QA_fetch_get_stock_day(name, start='', end='', if_fq='01', type_='pd'):
if (len(name) != 6):
name = str(name)[0:6]
if str(if_fq) in ['qfq', '01']:
if_fq = 'qfq'
elif str(if_fq) in ['hfq', '02']:
if_fq = 'hfq'
elif str(if_fq) in ['bfq', '00']:
if_fq = 'bfq'
else:
QA_util_log_info('wrong with fq_factor! using qfq')
if_fq = 'qfq'
data = QATs.get_k_data(str(name), start, end, ktype='D', autype=if_fq, retry_count=200, pause=0.005).sort_index()
data['date_stamp'] = data['date'].apply(lambda x: QA_util_date_stamp(x))
data['fqtype'] = if_fq
if type_ in ['json']:
data_json = QA_util_to_json_from_pandas(data)
return data_json
elif type_ in ['pd', 'pandas', 'p']:
data['date'] = pd.to_datetime(data['date'])
data = data.set_index('date', drop=False)
data['date'] = data['date'].apply(lambda x: str(x)[0:10])
return data
def QA_fetch_get_stock_realtime():
data = QATs.get_today_all()
data_json = QA_util_to_json_from_pandas(data)
return data_json
def QA_fetch_get_stock_info(name):
data = QATs.get_stock_basics()
try:
return data.loc[name]
except:
return None
def QA_fetch_get_stock_tick(name, date):
if (len(name) != 6):
name = str(name)[0:6]
return QATs.get_tick_data(name, date)
def QA_fetch_get_stock_list():
df = QATs.get_stock_basics()
return list(df.index)
def QA_fetch_get_stock_time_to_market():
data = QATs.get_stock_basics()
return data[data['timeToMarket'] != 0]['timeToMarket'].apply(lambda x: QA_util_date_int2str(x))
def QA_fetch_get_trade_date(end, exchange):
data = QATs.trade_cal()
da = data[data.isOpen > 0]
data_json = QA_util_to_json_from_pandas(data)
message = []
for i in range(0, len(data_json) - 1, 1):
date = data_json[i]['calendarDate']
num = i + 1
exchangeName = 'SSE'
data_stamp = QA_util_date_stamp(date)
mes = {'date': date, 'num': num,
'exchangeName': exchangeName, 'date_stamp': data_stamp}
message.append(mes)
return message
def QA_fetch_get_lhb(date):
return QATs.top_list(date)
# test
# print(get_stock_day("000001",'2001-01-01','2010-01-01'))
# print(get_stock_tick("000001.SZ","2017-02-21"))
|
the-stack_0_26922
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce.core import ModuleDict, parameter_modules, SignatureDict, TensorDict, TensorSpec, \
TensorsSpec, tf_function, tf_util
from tensorforce.core.policies import Policy
class StochasticPolicy(Policy):
"""
Base class for stochastic policies.
Args:
temperature (parameter | dict[parameter], float >= 0.0): Sampling temperature, global or
per action (<span style="color:#00C000"><b>default</b></span>: 1.0).
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
states_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
auxiliaries_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
actions_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
kldiv_reference_spec (specification):
<span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, temperature=1.0, device=None, l2_regularization=None, name=None, states_spec=None,
auxiliaries_spec=None, internals_spec=None, actions_spec=None, kldiv_reference_spec=None
):
super().__init__(
device=device, l2_regularization=l2_regularization, name=name, states_spec=states_spec,
auxiliaries_spec=auxiliaries_spec, actions_spec=actions_spec
)
self.kldiv_reference_spec = kldiv_reference_spec
# Sampling temperature
if temperature is None:
temperature = 1.0
if isinstance(temperature, dict) and all(name in self.actions_spec for name in temperature):
# Different temperature per action
def function(name, spec):
return self.submodule(
name=(name + '_temperature'), module=temperature.get(name, 0.0),
modules=parameter_modules, is_trainable=False, dtype='float', min_value=0.0
)
self.temperature = self.actions_spec.fmap(
function=function, cls=ModuleDict, with_names=True
)
else:
# Same temperature for all actions
self.temperature = self.submodule(
name='temperature', module=temperature, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
def input_signature(self, *, function):
if function == 'act_entropy':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
deterministic=TensorSpec(type='bool', shape=()).signature(batched=False)
)
elif function == 'entropy':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
elif function == 'entropies':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
elif function == 'kl_divergence':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
reference=self.distributions.fmap(
function=(lambda x: x.parameters_spec), cls=TensorsSpec
).signature(batched=True)
)
elif function == 'kl_divergences':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
reference=self.distributions.fmap(
function=(lambda x: x.parameters_spec), cls=TensorsSpec
).signature(batched=True)
)
elif function == 'kldiv_reference':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
elif function == 'log_probability':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True)
)
elif function == 'log_probabilities':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True)
)
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'act_entropy':
return SignatureDict(
actions=self.actions_spec.signature(batched=True),
internals=self.internals_spec.signature(batched=True),
entropy=TensorSpec(type='float', shape=()).signature(batched=True)
)
elif function == 'entropy':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=True)
)
elif function == 'entropies':
return SignatureDict(
singleton=self.actions_spec.fmap(function=(
lambda spec: TensorSpec(type='float', shape=spec.shape).signature(batched=True)
), cls=SignatureDict)
)
elif function == 'kl_divergence':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=True)
)
elif function == 'kl_divergences':
return SignatureDict(
singleton=self.actions_spec.fmap(function=(
lambda spec: TensorSpec(type='float', shape=spec.shape).signature(batched=True)
), cls=SignatureDict)
)
elif function == 'kldiv_reference':
return SignatureDict(
singleton=self.kldiv_reference_spec.signature(batched=True)
)
elif function == 'log_probability':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=True)
)
elif function == 'log_probabilities':
return SignatureDict(
singleton=self.actions_spec.fmap(function=(
lambda spec: TensorSpec(type='float', shape=spec.shape).signature(batched=True)
), cls=SignatureDict)
)
else:
return super().output_signature(function=function)
@tf_function(num_args=5)
def log_probability(self, *, states, horizons, internals, auxiliaries, actions):
log_probabilities = self.log_probabilities(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
actions=actions
)
def function(value, spec):
return tf.reshape(tensor=value, shape=(-1, spec.size))
log_probabilities = log_probabilities.fmap(function=function, zip_values=self.actions_spec)
log_probabilities = tf.concat(values=tuple(log_probabilities.values()), axis=1)
return tf.math.reduce_sum(input_tensor=log_probabilities, axis=1)
@tf_function(num_args=4)
def entropy(self, *, states, horizons, internals, auxiliaries):
entropies = self.entropies(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
def function(value, spec):
return tf.reshape(tensor=value, shape=(-1, spec.size))
# See also implementation of ParametrizedDistributions.act_entropy()
entropies = entropies.fmap(function=function, zip_values=self.actions_spec)
entropies = tf.concat(values=tuple(entropies.values()), axis=1)
return tf.math.reduce_mean(input_tensor=entropies, axis=1)
@tf_function(num_args=5)
def kl_divergence(self, *, states, horizons, internals, auxiliaries, reference):
kl_divergences = self.kl_divergences(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
reference=reference
)
def function(value, spec):
return tf.reshape(tensor=value, shape=(-1, spec.size))
kl_divergences = kl_divergences.fmap(function=function, zip_values=self.actions_spec)
kl_divergences = tf.concat(values=tuple(kl_divergences.values()), axis=1)
return tf.math.reduce_mean(input_tensor=kl_divergences, axis=1)
@tf_function(num_args=5)
def act(self, *, states, horizons, internals, auxiliaries, deterministic, independent):
raise NotImplementedError
@tf_function(num_args=5)
def act_entropy(self, *, states, horizons, internals, auxiliaries, deterministic, independent):
raise NotImplementedError
@tf_function(num_args=4)
def entropies(self, *, states, horizons, internals, auxiliaries):
raise NotImplementedError
@tf_function(num_args=5)
def kl_divergences(self, *, states, horizons, internals, auxiliaries, reference):
raise NotImplementedError
@tf_function(num_args=4)
def kldiv_reference(self, *, states, horizons, internals, auxiliaries):
raise NotImplementedError
@tf_function(num_args=5)
def log_probabilities(self, *, states, horizons, internals, auxiliaries, actions):
raise NotImplementedError
|
the-stack_0_26923
|
#!/usr/bin/env python
"""
Read in YAML file with software management plan advice and guidance
and print it out in Markdown.
usage: python yaml_to_markdown.py [-h] [-f FILE] [-t TYPE]
optional arguments:
-h, --help show this help message and exit
-f FILE, --file FILE YAML configuration file
-t TYPE, --type TYPE Document type
('paper' | 'template' | 'markdown-template')
The YAML file must hold a single document. The document must be
structured as follows:
---
metadata:
title: Title.
author: Author.
date-meta: Data for HTML meta-data tag.
citation-date: Human-readable date.
version: Version number.
doi: DOI of document being produced.
website: URL of web site associated with document.
keywords: list of keywords.
licence: licence information.
licence-tag: licence tag, from SPDX, https://spdx.org/licenses/.
changelog:
# Record only notes for current version as date, version, doi inferred
# from metadata above.
- notes: Notes about current version.
- version: Previous version number.
doi: Previous version's DOI.
date: Previous version's publication date.
notes: Notes about previous version.
- ...
intro:
- Introductory text.
- ...
usage: Usage conditions.
acks: Acknowledgements.
sections:
- section: Section name e.g. About your software
consider:
- A sub-question to consider.
- Another sub-question to consider.
guidance:
- Some guidance.
- Each entry corresponds to a paragraph.
- Bulleted list entry within guidance.
- Bulleted list entry within guidance.
- Some more guidance.
- section: Another section name.
...
The following constraints hold for each field:
* metadata: 1
* title: 1
* author: 1
* date-meta: 1
* citation-date: 1
* version: 1
* doi: 1
* website: 1
* keywords: 0+
* licence: 1
* licence-tag: 1
* changelog: 1, with 1+ entries.
* notes: 1 per changelog entry.
* intro: 1, with 1+ entries.
* version: 1 for all but first changelog entry.
* doi: 1 for all but first changelog entry.
* date: 1 for all but first changelog entry.
* sections: 1
* section: 0+
* consider: 0 or 1 per section. If provided then its sequence must
have 1+ entries.
* guidance: 0 or 1 per section. If provided then its sequence must
have 1+ entries.
* Colons, :, in text should be surrounded by text or, for a lead-in
to a list use a ",". Otherwise YAML will interpret the colon as
its delimiter. As an example:
- guidance:
- Examples of this approach include:one, two, three
- Other examples include,
-
- four.
- five.
"""
from argparse import ArgumentParser
import yaml
METADATA = "metadata"
DATE = "date"
DATEMETA = "date-meta"
DOI = "doi"
TITLE = "title"
VERSION = "version"
INTRO = "intro"
USAGE = "usage"
ACKS = "acks"
CHANGELOG = "changelog"
NOTES = "notes"
SECTIONS = "sections"
SECTION = "section"
CONSIDER = "consider"
GUIDANCE = "guidance"
PAPER = "paper"
MARKDOWN_TEMPLATE = "markdown-template"
TEMPLATE = "template"
def read_file(file_name):
"""
Read YAML file and return contents.
:param file_name: file name
:type file_name: str or unicode
:return: document
:rtype: dict
"""
document = None
with open(file_name, "r") as stream:
document = yaml.load(stream)
return document
def write_paper(document):
"""
Write out software management plan paper as Markdown.
:param document: software management plan
:type document: dict
"""
print("---")
for (key, value) in list(document[METADATA].items()):
print((key + ": " + str(value)))
print("---\n")
print("## Einleitung\n")
for intro in document[INTRO]:
print((intro + "\n"))
print("## Benutzung dieser Checkliste\n")
print((document[USAGE] + "\n"))
print("## Danksagung\n")
print((document[ACKS] + "\n"))
print("## Änderungen\n")
changes = document[CHANGELOG]
change = changes[0]
print(("* " + str(document[METADATA][VERSION]) + " (" +
str(document[METADATA][DATEMETA]) + ") " +
change[NOTES] + " " +
"doi:" + document[METADATA][DOI]))
for change in changes[1:]:
print(("* " + str(change[VERSION]) + " (" +
str(change[DATE]) + ") " +
change[NOTES] + " " +
"doi:" + change[DOI]))
print("\n")
write_paper_body(document[SECTIONS])
def write_paper_body(sections):
"""
Write out software management plan paper body as Markdown.
Process given list of dictionaries, each corresponding to a single
section of a software management plan and output these as
Markdown.
* Each section title is represented as a level 2 heading.
* Each sections's questions to consider and guidance are
represented as bulleted lists.
:param sections: sections
:type sections: list of dict
"""
for section in sections:
print(("## " + section[SECTION] + "\n"))
if CONSIDER in list(section.keys()):
print("**Zu berücksichtigende Fragen:**\n")
for consider in section[CONSIDER]:
print(("* " + consider))
print("")
if GUIDANCE in list(section.keys()):
print(("**Anleitung:**\n"))
for guidance in section[GUIDANCE]:
if isinstance(guidance, list):
for element in guidance:
print(("* " + element))
print("")
else:
print((guidance + "\n"))
def write_template(document):
"""
Write out software management plan template as Markdown.
This Markdown is intended to be used as an intermediary before
onward conversion into another format e.g. to docx, odt or
html, using, for example, pandoc.
The Markdown starts with:
---
title: PROJECT-NAME Software Management Plan
---
:param document: software management plan
:type document: dict
"""
print("---")
print((TITLE + ": PROJECT-NAME Software Management Plan"))
print("---\n")
write_template_body(document[SECTIONS])
def write_template_body(sections):
"""
Write out software management plan template body as Markdown.
Process given list of dictionaries, each corresponding to a single
section of a software management plan and output these as
Markdown.
* Each section title is represented as a level 2 heading.
* Each section's questions to consider are represented as
plain text on separate lines.
This Markdown is intended to be used as an intermediary before
onward conversion into another format e.g. to docx, odt or
html, using, for example, pandoc.
:param sections: sections
:type sections: list of dict
"""
for section in sections:
print(("## " + section[SECTION] + "\n"))
if CONSIDER in list(section.keys()):
for consider in section[CONSIDER]:
print((consider + "\n"))
else:
# Insert non-breaking spaces into Markdown so that
# they are not ignored during downstream conversion.
print(" \n\n \n\n \n\n")
def write_markdown_template(document):
"""
Write out software management plan template as Markdown.
This Markdown is intended to be used as a standalone
Markdown document.
:param document: software management plan
:type document: dict
"""
print(("# PROJECT-NAME Software Management Plan\n"))
write_markdown_template_body(document[SECTIONS])
def write_markdown_template_body(sections):
"""
Write out software management plan template body as Markdown.
Process given list of dictionaries, each corresponding to a single
section of a software management plan and output these as
Markdown.
* Each section title is represented as a level 2 heading.
* Each section's questions to consider are represented as
a bulleted list on separate lines, embedded within a block
quote.
:param sections: sections
:type sections: list of dict
"""
for section in sections:
print(("## " + section[SECTION] + "\n"))
if CONSIDER in list(section.keys()):
for consider in section[CONSIDER]:
print(("> * " + consider))
else:
print("> ...")
print("")
def parse_command_line_arguments():
"""
Parse command-line arguments, printing usage information if there
are any problems.
:return: command-line arguments
:rtype: argparse.Namespace
"""
parser = ArgumentParser("python yaml_to_markdown.py")
parser.add_argument("-f", "--file",
dest="file",
help="YAML configuration file")
parser.add_argument("-t", "--type",
default=PAPER,
dest="type",
help="Document type ('paper' | 'template')")
args = parser.parse_args()
if not args.file:
parser.error("Missing file name")
return args
def yaml_to_markdown(args):
"""
Process YAML file and output desired file.
:param args: command-line arguments.
:type args: argparse.Namespace
"""
file_name = args.file
doc_type = args.type
document = read_file(file_name)
if doc_type == TEMPLATE:
write_template(document)
elif doc_type == MARKDOWN_TEMPLATE:
write_markdown_template(document)
else:
write_paper(document)
if __name__ == '__main__':
command_line_args = parse_command_line_arguments()
yaml_to_markdown(command_line_args)
|
the-stack_0_26925
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .layernorm import BertLayerNorm
import torch
from torch import nn
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
position_ids = self.get_position_ids(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def get_position_ids(self, input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
return position_ids
|
the-stack_0_26926
|
#!/usr/bin/env python3
import sys
import getopt
import glob
from os.path import isfile, join, dirname, split, isdir
from os import remove, walk
import math
import numpy as np
from soundfile import SoundFile
blocklenSec = 3
RMSpercentage = 20
NhighestPeak = 2
textout = True
def main(argv):
recurse = False
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
print("drmeter.py <audiofile or path>")
sys.exit(2)
for arg in args:
filelist = []
if isfile(arg) is True:
try:
SoundFile(arg)
except RuntimeError:
continue
filelist.append(arg)
elif isdir(arg) is True:
for root, dirs, files in walk(arg):
for file in files:
try:
SoundFile(join(root, file))
except RuntimeError:
continue
filelist.append(join(root, file))
else:
print("Ignoring '{0}' (not a file or folder)".format(arg))
continue
if len(filelist) == 0:
raise IOError("No matching files have been found")
if textout is True:
path = dirname(filelist[0])
textpath = join(path, "{0}_dr.txt".format(split(path)[-1]))
try:
f = open(textpath, "x")
except FileExistsError:
remove(textpath)
f = open(textpath, "x")
linewidth = 75
dashedline = "".join(["-"[:5]] * linewidth)
if len(path) > linewidth - 18:
path = split(path)[-1]
f.write(dashedline + "\n")
f.write(" Analyzed folder: {0}\n".format(path))
f.write(dashedline + "\n")
f.write(" DR Peak RMS Filename\n")
f.write(dashedline + "\n\n")
f.close()
idx = 0
for nfile in filelist:
# try:
DR, Peak, RMS = calc_drscore(nfile)
# except RuntimeError:
# For unrecognizable (non-audio) files
# continue
if idx == 0:
DR_all = np.zeros((len(filelist), len(DR)))
DR_all[idx, :] = DR
else:
DR_all[idx, :] = DR
idx += 1
if textout is True:
f = open(textpath, "a")
f.write(" DR{0:02.0f} {1:+6.2f} dB {2:+6.2f} dB {3}\n"
.format(
DR.mean(),
10 * np.log10(np.power(10, Peak / 10).mean()),
10 * np.log10(np.power(10, RMS / 10).mean()),
split(nfile)[-1]))
f.close()
if textout is True:
f = open(textpath, "a")
f.write(dashedline + "\n\n")
f.write(" Number of files:\t{0:d}\n".format(len(filelist)))
f.write(" Official DR value:\tDR{0:.0f}\n\n".format(DR_all.mean()))
f.write("".join(["="[:5]] * linewidth) + "\n")
f.close()
return
def calc_drscore(filename):
data = SoundFile(filename)
NblockLen = round(blocklenSec * data.samplerate)
NblockIdx = math.ceil(data.frames / NblockLen)
Nchannels = data.channels
RMS = np.zeros((NblockIdx, Nchannels))
Pk = np.zeros((NblockIdx, Nchannels))
for nn in range(NblockIdx):
curData = np.array(data.read(NblockLen), ndmin=2)
for cc in range(Nchannels):
interim = 2 * (np.power(np.abs(curData[:, cc]), 2))
RMS[nn, cc] = math.sqrt(interim.mean())
Pk[nn, cc] = max(abs(curData[:, cc]))
iUpmostBlocks = round(NblockIdx * RMSpercentage * 0.01)
RMS.sort(axis=0)
Pk.sort(axis=0)
RMS[:] = RMS[::-1, :]
Pk[:] = Pk[::-1, :]
RMS_upmost = RMS[:iUpmostBlocks, :]
RMS_total = np.sqrt((np.power(RMS, 2)).mean(axis=0))
pre0 = np.power(RMS_upmost, 2).sum(axis=0)
pre1 = np.repeat(iUpmostBlocks, Nchannels, axis=0)
pre2 = np.sqrt(pre0 / pre1)
DR_score = Pk[NhighestPeak - 1, :] / pre2
RMS_score = RMS_total
Peak_score = Pk[0, :]
DR_score_log = 20 * np.log10(DR_score)
RMS_score_log = 20 * np.log10(RMS_score)
Peak_score_log = 20 * np.log10(Peak_score)
print()
print("DR analysis results:")
print("====================")
print(filename)
print()
print(" : ", end="")
for n in range(Nchannels):
print(" Chann {0:2d} :: ".format(n + 1), end="")
print()
print("Peak : ", end="")
for peak in Peak_score_log:
print("{0:7.2f} dB :: ".format(peak), end="")
print()
print("RMS : ", end="")
for rms in RMS_score_log:
print("{0:7.2f} dB :: ".format(rms), end="")
print()
print("DR : ", end="")
for dr in DR_score_log:
print("{0:7.2f} :: ".format(dr), end="")
print()
return DR_score_log, Peak_score_log, RMS_score_log
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_26927
|
import torch
import torch.nn as nn
from sota.cnn.operations import *
import sys
sys.path.insert(0, '../../')
from nasbench201.utils import drop_path
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2 * i]]
h2 = states[self._indices[2 * i + 1]]
op1 = self._ops[2 * i]
op2 = self._ops[2 * i + 1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class AuxiliaryHead(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHead, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
# image size = 2 x 2
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class Network(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(Network, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier * C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHead(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
|
the-stack_0_26929
|
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import unittest
import keras2onnx
import numpy as np
from keras2onnx.proto import keras
from keras.regularizers import l2
from keras.initializers import TruncatedNormal
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg):
def f(input_):
residual = input_
layer_out = Conv1D(filters=nb_filter, kernel_size=filter_length,
dilation_rate=dilation,
activation='linear', padding='causal', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(input_)
layer_out = Activation('selu')(layer_out)
skip_out = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_in = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_out = Add()([residual, network_in])
return network_out, skip_out
return f
def DC_CNN_Model(length):
input = Input(shape=(length, 1))
l1a, l1b = DC_CNN_Block(32, 2, 1, 0.001)(input)
l2a, l2b = DC_CNN_Block(32, 2, 2, 0.001)(l1a)
l3a, l3b = DC_CNN_Block(32, 2, 4, 0.001)(l2a)
l4a, l4b = DC_CNN_Block(32, 2, 8, 0.001)(l3a)
l5a, l5b = DC_CNN_Block(32, 2, 16, 0.001)(l4a)
l6a, l6b = DC_CNN_Block(32, 2, 32, 0.001)(l5a)
l6b = Dropout(0.8)(l6b) # dropout used to limit influence of earlier data
l7a, l7b = DC_CNN_Block(32, 2, 64, 0.001)(l6a)
l7b = Dropout(0.8)(l7b) # dropout used to limit influence of earlier data
l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])
l9 = Activation('relu')(l8)
l21 = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42),
kernel_regularizer=l2(0.001))(l9)
model = Model(input=input, output=l21)
return model
# Model from https://github.com/kristpapadopoulos/seriesnet
class TestSeriesNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(False,
"Test level 0 only.")
def test_series_net(self):
K.clear_session()
keras_model = DC_CNN_Model(20)
data = np.random.rand(2000, 20, 1).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_26930
|
# -*- coding: utf-8 -*-
import pandas as pd
from zvt.api import TIME_FORMAT_DAY, get_str_schema
from zvt.contract.api import df_to_db
from zvt.contract.recorder import TimeSeriesDataRecorder
from zvt.domain import HolderTrading
from zvt.domain import StockDetail
from zvt.recorders.emquantapi.common import mainCallback, to_em_entity_id
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import now_pd_timestamp, to_time_str
try:
from EmQuantAPI import *
except:
pass
class HolderTradingRecorder(TimeSeriesDataRecorder):
entity_provider = 'joinquant'
entity_schema = StockDetail
# 数据来自jq
provider = 'emquantapi'
data_schema = HolderTrading
def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, real_time=False,
fix_duplicate_way='add', start_timestamp=None, end_timestamp=None, close_hour=0,
close_minute=0) -> None:
self.data_schema = get_str_schema('HolderTrading')
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
# 调用登录函数(激活后使用,不需要用户名密码)
loginResult = c.start("ForceLogin=1", '', mainCallback)
if (loginResult.ErrorCode != 0):
print("login in fail")
exit()
def on_finish(self):
# 退出
loginresult = c.stop()
if (loginresult.ErrorCode != 0):
print("login in fail")
exit()
def record(self, entity, start, end, size, timestamps):
if not end:
end = to_time_str(now_pd_timestamp())
start = to_time_str(start)
em_code = to_em_entity_id(entity)
columns_list = list(self.data_schema.get_data_map(self))
data = c.ctr("HoldTradeDetailInfo", columns_list,
"secucode=" + em_code + ",StartDate=" + start + ",EndDate=" + end + ",HoldType=0")
if data.Data=={}:
return None
df = pd.DataFrame(data.Data).T
df.columns = data.Indicators
df = df.sort_values("NOTICEDATE", ascending=True)
df['TOTALSHARE'] = df.NOTICEDATE.apply(
lambda x: c.css(em_code, "TOTALSHARE", "EndDate=" + x + ",ispandas=1").TOTALSHARE[0])
# 变动比例(千分位) h = (df['变动_流通股数量(万股)'] / (df['变动后_持股总数(万股)'] / (df['变动后_占总股本比例(%)'] / 100)))
df['CHANGENUM'] = df['CHANGENUM'] * 10000
df['BDHCGZS'] = df['BDHCGZS'] * 10000 # 变动后_持股总数
df['change_pct'] = abs(df['CHANGENUM'] / df['TOTALSHARE']).astype(float) * 1000
df['change_pct'] = df['change_pct'].round(5)
if pd_is_not_null(df):
df.reset_index(drop=True, inplace=True)
df.rename(columns=self.data_schema.get_data_map(self), inplace=True)
df['entity_id'] = entity.id
df['timestamp'] = pd.to_datetime(df.holder_end_date)
df['provider'] = 'emquantapi'
df['code'] = entity.code
def generate_id(se):
return "{}_{}_{}".format(se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_DAY), se.name)
df_res = pd.concat([i.reset_index(drop=True) for i in dict(list(df.groupby('timestamp'))).values()])
df_res.index+=1
df_res['id'] = df_res[['entity_id', 'timestamp']].apply(generate_id, axis=1)
df_to_db(df=df_res, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
return None
__all__ = ['HolderTradingRecorder']
if __name__ == '__main__':
# 上证50
HolderTradingRecorder(codes=['000002'],sleeping_time=0.1).run()
# JqChinaEtfValuationRecorder(codes=['512290']).run()
|
the-stack_0_26933
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example is inspired by https://github.com/jason71995/Keras-GAN-Library,
# https://github.com/kazizzad/DCGAN-Gluon-MxNet/blob/master/MxnetDCGAN.ipynb
# https://github.com/apache/incubator-mxnet/blob/master/example/gluon/dc_gan/dcgan.py
import math
import numpy as np
import imageio
def save_image(data, epoch, image_size, batch_size, output_dir, padding=2):
""" save image """
data = data.asnumpy().transpose((0, 2, 3, 1))
datanp = np.clip(
(data - np.min(data))*(255.0/(np.max(data) - np.min(data))), 0, 255).astype(np.uint8)
x_dim = min(8, batch_size)
y_dim = int(math.ceil(float(batch_size) / x_dim))
height, width = int(image_size + padding), int(image_size + padding)
grid = np.zeros((height * y_dim + 1 + padding // 2, width *
x_dim + 1 + padding // 2, 3), dtype=np.uint8)
k = 0
for y in range(y_dim):
for x in range(x_dim):
if k >= batch_size:
break
start_y = y * height + 1 + padding // 2
end_y = start_y + height - padding
start_x = x * width + 1 + padding // 2
end_x = start_x + width - padding
np.copyto(grid[start_y:end_y, start_x:end_x, :], datanp[k])
k += 1
imageio.imwrite(
'{}/fake_samples_epoch_{}.png'.format(output_dir, epoch), grid)
|
the-stack_0_26934
|
import base64
import binascii
import logging
import re
from Crypto.Cipher import AES
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils import parse_json, update_scheme
from streamlink.utils.crypto import unpad_pkcs5
log = logging.getLogger(__name__)
class WebTV(Plugin):
_url_re = re.compile(r"http(?:s)?://(\w+)\.web.tv/?")
_sources_re = re.compile(r'"sources": (\[.*?\]),', re.DOTALL)
_sources_schema = validate.Schema([
{
"src": validate.any(
validate.contains("m3u8"),
validate.all(
validate.text,
validate.transform(lambda x: WebTV.decrypt_stream_url(x)),
validate.contains("m3u8")
)
),
"type": validate.text,
"label": validate.text
}
])
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
@staticmethod
def decrypt_stream_url(encoded_url):
data = base64.b64decode(encoded_url)
cipher_text = binascii.unhexlify(data[96:])
decryptor = AES.new(binascii.unhexlify(data[32:96]),
AES.MODE_CBC,
binascii.unhexlify(data[:32]))
return unpad_pkcs5(decryptor.decrypt(cipher_text)).decode("utf8")
def _get_streams(self):
"""
Find the streams for web.tv
:return:
"""
headers = {}
res = self.session.http.get(self.url, headers=headers)
headers["Referer"] = self.url
sources = self._sources_re.findall(res.text)
if len(sources):
sdata = parse_json(sources[0], schema=self._sources_schema)
for source in sdata:
log.debug(f"Found stream of type: {source['type']}")
if source["type"] == "application/vnd.apple.mpegurl":
url = update_scheme(self.url, source["src"])
try:
# try to parse the stream as a variant playlist
variant = HLSStream.parse_variant_playlist(self.session, url, headers=headers)
if variant:
yield from variant.items()
else:
# and if that fails, try it as a plain HLS stream
yield 'live', HLSStream(self.session, url, headers=headers)
except OSError:
log.warning("Could not open the stream, perhaps the channel is offline")
__plugin__ = WebTV
|
the-stack_0_26935
|
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('/root/PycharmProjects/tf/data',one_hot=True)
lr=tf.Variable(1.0)
batch_size=tf.placeholder(tf.int32,[])
keep_prob=tf.placeholder(tf.float32,[])
input_size=num_step=28
hidden_size=256
layer_num=2
class_num=10
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,class_num])
X=tf.reshape(x, [-1, num_step, input_size])
def get_lstm_cell():
lstmcell=rnn.BasicLSTMCell(hidden_size,reuse=tf.get_variable_scope().reuse)
return rnn.DropoutWrapper(lstmcell,input_keep_prob=keep_prob,output_keep_prob=keep_prob)
rnncell=rnn.MultiRNNCell([get_lstm_cell() for _ in range(layer_num)])
init_state=rnncell.zero_state(batch_size,dtype=tf.float32)
inputs=tf.unstack(X,axis=1)
# with tf.variable_scope('rnn',reuse=tf.AUTO_REUSE) as scope:
outputs,state=tf.nn.static_rnn(rnncell,inputs,initial_state=init_state)
# output=tf.reshape(tf.concat(outputs,1),[-1,num_step,hidden_size])
# h_state=output[:,-1,:]
h_state=outputs[-1]
w=tf.Variable(tf.truncated_normal([hidden_size,class_num],stddev=0.1),dtype=tf.float32)
b=tf.Variable(tf.constant(0.1,shape=[class_num]),dtype=tf.float32)
pre=tf.nn.softmax(tf.nn.xw_plus_b(h_state,w,b))
# loss=tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=y,logits=pre))
loss=-tf.reduce_mean(tf.multiply(y,tf.log(pre)))
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),10)
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op=optimizer.apply_gradients(zip(grads,tvars),global_step=tf.train.get_or_create_global_step())
# train_op=tf.train.AdamOptimizer(0.001).minimize(loss)
correct=tf.equal(tf.argmax(pre,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(2000):
_batch_size = 128
batch = mnist.train.next_batch(_batch_size)
if (i + 1) % 200 == 0:
train_accuracy = sess.run(accuracy, feed_dict={
x: batch[0], y: batch[1], keep_prob: 1.0, batch_size: _batch_size})
print("Iter%d, step %d, training accuracy %g" % (mnist.train.epochs_completed, (i + 1), train_accuracy))
sess.run(train_op, feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5, batch_size: _batch_size})
testdata=mnist.test.next_batch(500)
print("test accuracy %g" % sess.run(accuracy, feed_dict={
x: testdata[0], y: testdata[1], keep_prob: 1.0, batch_size:500}))
|
the-stack_0_26936
|
"""
Test the file templates with some logic inside them
"""
from pathlib import Path
from projects_boilerplate import file_templates
from projects_boilerplate.structs import License
def test_manifest_template():
template = file_templates.ManifestTemplate()
assert template.file_name == 'MANIFEST.in'
def test_license_apache(monkeypatch):
monkeypatch.setattr(file_templates.LicenseTemplate, 'licenses_location', Path('tests/location'))
template = file_templates.LicenseApacheTemplate()
assert template.template_location == Path('tests/location/license_apache.tpl')
def test_license_mit(monkeypatch):
monkeypatch.setattr(file_templates.LicenseTemplate, 'licenses_location', Path('tests/location'))
template = file_templates.LicenseMitTemplate()
assert template.template_location == Path('tests/location/license_mit.tpl')
def test_license_gpl(monkeypatch):
monkeypatch.setattr(file_templates.LicenseTemplate, 'licenses_location', Path('tests/location'))
template = file_templates.LicenseGplTemplate()
assert template.template_location == Path('tests/location/license_gpl.tpl')
def test_pytest_template(tmp_path):
template = file_templates.PytestTemplate('project_test')
template.build_template(tmp_path)
assert (tmp_path / 'pytest.ini').read_text() == """\
[pytest]
testpaths = tests/ project_test/
cache_dir = .cache
mccabe-complexity = 10
log_format =
%(filename)s:%(lineno)d: [%(name)s:%(levelname)s] %(asctime)s: %(message)s
log_date_format = %Y-%m-%d %H:%M:%S
addopts =
--cov-config coverage.ini
--cov-report term
--cov-report html:coverage/html
--cov project_test/
--pycodestyle
--isort
--mccabe
--mypy
--pylint --pylint-rcfile pylint.ini
--verbose\
"""
def test_sample_test_template(tmp_path):
template = file_templates.SampleTestTemplate('project_test')
template.build_template(tmp_path)
assert (tmp_path / 'test_sample.py').read_text() == """\
\"\"\"
This is a test for the sample file
\"\"\"
from project_test import sample
def test_sample():
assert sample.sample_method(2) == 4
"""
def test_setup_py_template(tmp_path):
template = file_templates.SetupPyTemplate('project-test', License.MIT, 'project_test')
template.build_template(tmp_path)
assert (tmp_path / 'setup.py').read_text() == """\
import sys
from setuptools import find_packages, setup
from pathlib import Path
with open(str(Path(".") / "README.md"), "r", encoding="utf-8") as f:
README = f.read()
setup(
name="project-test",
version="0.0.0",
license="MIT",
url="url_to_your_project",
description="Add your description here",
long_description=README,
long_description_content_type="text/markdown",
author="Add your name here",
author_email="Add your email here",
packages=find_packages(exclude=["tests*"]),
test_suite="tests",
extras_require={
"dev": [
"pylint"
],
"test": [
"pytest",
"pytest-cov",
"pytest-pycodestyle",
"pytest-isort",
"pytest-mccabe",
"pytest-mypy",
"pytest-pylint",
"tox"
]
},
python_requires=">=3.6",
entry_points={
"console_scripts": [
"project-test=project_test.main:main",
]
},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
"""
def test_python_dockerfile_template(tmp_path):
template = file_templates.PythonDockerfileTemplate('project-test', 'project_test')
template.build_template(tmp_path)
assert (tmp_path / 'Dockerfile').read_text() == """\
FROM python:3.7-alpine
LABEL maintainer="your email here"
LABEL description="You description here"
LABEL version="0.0.0"
WORKDIR /usr/app
COPY setup.py README.md MANIFEST.in ./
COPY project_test ./project_test
RUN pip install .
RUN mkdir /files
VOLUME /files
ENTRYPOINT [ "project-test" ]
CMD []
"""
def test_flask_dockerfile_template(tmp_path):
template = file_templates.FlaskDockerfileTemplate('project_test')
template.build_template(tmp_path)
assert (tmp_path / 'Dockerfile').read_text() == """\
FROM python:3.7-alpine
LABEL maintainer="your email here"
LABEL description="You description here"
LABEL version="0.0.0"
WORKDIR /usr/app
COPY app.py README.md requirements.txt ./
COPY project_test ./project_test
RUN pip install gunicorn
RUN pip install -r requirements.txt
RUN mkdir /config
VOLUME /config
ENTRYPOINT [ "gunicorn" ]
CMD ["wsgi:app"]
"""
def test_flask_app_template(tmp_path):
template = file_templates.FlaskAppTemplate('project_test')
template.build_template(tmp_path)
assert (tmp_path / 'app.py').read_text() == """\
from project_test import create_app
if __name__ == '__main__':
app = create_app(debug=True)
app.run(port=5000, host='0.0.0.0')
"""
def test_flask_wsgi_template(tmp_path):
template = file_templates.WsgiTemplate('project_test')
template.build_template(tmp_path)
assert (tmp_path / 'wsgi.py').read_text() == """\
from project_test import create_app
app = create_app(debug=False)
"""
def test_flask_init_template(tmp_path):
template = file_templates.FlaskInitTemplate('toto')
(tmp_path / 'toto').mkdir()
template.build_template(tmp_path)
assert (tmp_path / 'toto/__init__.py').read_text() == """\
\"\"\"
Main package for the application
\"\"\"
from flask import Flask
from .views import MAIN_VIEWS
def create_app(debug=False):
app = Flask(__name__)
app.debug = debug
app.register_blueprint(MAIN_VIEWS)
return app
"""
def test_flask_requirements_template(tmp_path):
template = file_templates.FlaskRequirementsTemplate()
template.build_template(tmp_path)
assert (tmp_path / 'requirements.txt').read_text() == """\
Flask
"""
def test_flask_test_requirements_template(tmp_path):
template = file_templates.FlaskTestRequirementsTemplate()
template.build_template(tmp_path)
assert (tmp_path / 'test-requirements.txt').read_text() == """\
pytest
pylint
pytest-cov
pytest-pycodestyle
pytest-isort
pytest-mccabe
pytest-mypy
pytest-pylint
pytest-flask
"""
def test_flask_test_app_template(tmp_path):
template = file_templates.FlaskTestAppTemplate('project_test', 'tests')
(tmp_path / 'tests').mkdir()
template.build_template(tmp_path)
assert (tmp_path / 'tests/test_app.py').read_text() == """\
\"\"\"
Test the base application
\"\"\"
import pytest
from project_test import create_app
@pytest.fixture(name='app')
def fixture_app():
app = create_app()
return app
def test_app(client):
response = client.get('/')
assert response.status_code == 200
assert "Flask Dockerized" in str(response.data)
"""
def test_python_readme_template(tmp_path):
template = file_templates.PythonReadmeTemplate("project-test")
template.build_template(tmp_path)
assert (tmp_path / 'README.md').read_text() == """\
# project-test
Project automatically generated with [projects boilerplate](https://github.com/aHugues/projects-boilerplate)
## Install and run the project
```
pip install .
```
```
project-test
```
## Run the tests
```
pip install -r test-requirements.txt
pytest
```
"""
def test_flask_readme_template(tmp_path):
template = file_templates.FlaskReadmeTemplate("project-test")
template.build_template(tmp_path)
assert (tmp_path / 'README.md').read_text() == """\
# project-test
Flask server automatically generated with [projects boilerplate](https://github.com/aHugues/projects-boilerplate)
## Install and run the project
```
pip install .
```
```
python app.py
```
## Run in production
```
pip install gunicorn
gunicorn wsgi:app
```
## Run the tests
```
pip install -r test-requirements.txt
pytest
```
"""
def test_init_template():
template = file_templates.InitTemplate()
assert template.file_name == '__init__.py'
|
the-stack_0_26937
|
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import sys
import time
from threading import Thread
from unittest import TestCase
try:
from unittest import SkipTest
except ImportError:
from unittest2 import SkipTest
from pytest import mark
import zmq
from zmq.utils import jsonapi
try:
import gevent
from zmq import green as gzmq
have_gevent = True
except ImportError:
have_gevent = False
PYPY = 'PyPy' in sys.version
#-----------------------------------------------------------------------------
# skip decorators (directly from unittest)
#-----------------------------------------------------------------------------
_id = lambda x: x
skip_pypy = mark.skipif(PYPY, reason="Doesn't work on PyPy")
require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4")
#-----------------------------------------------------------------------------
# Base test class
#-----------------------------------------------------------------------------
class BaseZMQTestCase(TestCase):
green = False
@property
def Context(self):
if self.green:
return gzmq.Context
else:
return zmq.Context
def socket(self, socket_type):
s = self.context.socket(socket_type)
self.sockets.append(s)
return s
def setUp(self):
super(BaseZMQTestCase, self).setUp()
if self.green and not have_gevent:
raise SkipTest("requires gevent")
self.context = self.Context.instance()
self.sockets = []
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close(0)
for ctx in contexts:
t = Thread(target=ctx.term)
t.daemon = True
t.start()
t.join(timeout=2)
if t.is_alive():
# reset Context.instance, so the failure to term doesn't corrupt subsequent tests
zmq.sugar.context.Context._instance = None
raise RuntimeError("context could not terminate, open sockets likely remain in test")
super(BaseZMQTestCase, self).tearDown()
def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'):
"""Create a bound socket pair using a random port."""
s1 = self.context.socket(type1)
s1.setsockopt(zmq.LINGER, 0)
port = s1.bind_to_random_port(interface)
s2 = self.context.socket(type2)
s2.setsockopt(zmq.LINGER, 0)
s2.connect('%s:%s' % (interface, port))
self.sockets.extend([s1,s2])
return s1, s2
def ping_pong(self, s1, s2, msg):
s1.send(msg)
msg2 = s2.recv()
s2.send(msg2)
msg3 = s1.recv()
return msg3
def ping_pong_json(self, s1, s2, o):
if jsonapi.jsonmod is None:
raise SkipTest("No json library")
s1.send_json(o)
o2 = s2.recv_json()
s2.send_json(o2)
o3 = s1.recv_json()
return o3
def ping_pong_pyobj(self, s1, s2, o):
s1.send_pyobj(o)
o2 = s2.recv_pyobj()
s2.send_pyobj(o2)
o3 = s1.recv_pyobj()
return o3
def assertRaisesErrno(self, errno, func, *args, **kwargs):
try:
func(*args, **kwargs)
except zmq.ZMQError as e:
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def _select_recv(self, multipart, socket, **kwargs):
"""call recv[_multipart] in a way that raises if there is nothing to receive"""
if zmq.zmq_version_info() >= (3,1,0):
# zmq 3.1 has a bug, where poll can return false positives,
# so we wait a little bit just in case
# See LIBZMQ-280 on JIRA
time.sleep(0.1)
r,w,x = zmq.select([socket], [], [], timeout=kwargs.pop('timeout', 5))
assert len(r) > 0, "Should have received a message"
kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
recv = socket.recv_multipart if multipart else socket.recv
return recv(**kwargs)
def recv(self, socket, **kwargs):
"""call recv in a way that raises if there is nothing to receive"""
return self._select_recv(False, socket, **kwargs)
def recv_multipart(self, socket, **kwargs):
"""call recv_multipart in a way that raises if there is nothing to receive"""
return self._select_recv(True, socket, **kwargs)
class PollZMQTestCase(BaseZMQTestCase):
pass
class GreenTest:
"""Mixin for making green versions of test classes"""
green = True
def assertRaisesErrno(self, errno, func, *args, **kwargs):
if errno == zmq.EAGAIN:
raise SkipTest("Skipping because we're green.")
try:
func(*args, **kwargs)
except zmq.ZMQError:
e = sys.exc_info()[1]
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close()
try:
gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True)
except gevent.Timeout:
raise RuntimeError("context could not terminate, open sockets likely remain in test")
def skip_green(self):
raise SkipTest("Skipping because we are green")
def skip_green(f):
def skipping_test(self, *args, **kwargs):
if self.green:
raise SkipTest("Skipping because we are green")
else:
return f(self, *args, **kwargs)
return skipping_test
|
the-stack_0_26940
|
import re
import datetime
from lndmanage.lib.network_info import NetworkAnalysis
from lndmanage.lib import ln_utilities
from lndmanage import settings
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# width of a column in the ouptput in characters
COL_WIDTH = 66
def padded_column_string(string1, string2, unit, shift=10, max_len_string1=25):
"""
Padded_column_string is a helper function which returns a formatted string
representing object, quantity and unit of a piece of information.
:param string1: information
:type string1: str
:param string2: quantity
:type string2: str
:param unit: unit of the information
:type unit: str
:param shift: whitespace shift to the right (indentation)
:type shift: int
:param max_len_string1: needed to calculate padding to the right
:type max_len_string1: int
:return:
:rtype:
"""
string = f" " * shift + f"{string1:<{max_len_string1}} {string2} {unit}"
if type(string2) == float:
string = f" " * shift + f"{string1:<{max_len_string1}} " \
f"{string2:0.6f} {unit}"
return string
class Info(object):
"""
Implements the info command, which displays info on individual channels and
nodes.
"""
def __init__(self, node):
"""
:param node: node object
:type node: lndmanage.lib.node.LndNode
"""
self.node = node
self.network_info = NetworkAnalysis(self.node)
def parse_and_print(self, info):
"""
Parses an info string for a channel id or node public key and prints
out the information gathered about the object.
:param info: channel id or node public key
:type info: str
"""
# analyzer = NetworkAnalysis(self.node)
try:
channel_id, node_pub_key = self.parse(info)
except ValueError:
logger.info("Info didn't represent neither a channel nor a node.")
return
# Info was a channel.
if channel_id is not None:
try:
general_info = self.node.network.edges[channel_id]
except KeyError:
logger.info("Channel id %s is not known in the public graph.",
channel_id)
return
# Add some more information on the channel.
general_info['node1_alias'] = \
self.node.network.node_alias(general_info['node1_pub'])
general_info['node2_alias'] = \
self.node.network.node_alias(general_info['node2_pub'])
general_info['blockheight'] = \
ln_utilities.convert_channel_id_to_short_channel_id(
channel_id)[0]
general_info['open_timestamp'] = ln_utilities.height_to_timestamp(
self.node, general_info['blockheight'])
# TODO: if it's our channel, add extra info
extra_info = None
self.print_channel_info(general_info)
# Info was a node.
else:
try:
general_info = self.network_info.node_info_basic(node_pub_key)
except KeyError:
return
# TODO: if it's a (channel) peer or our node, add extra info
extra_info = None
self.print_node_info(general_info)
def parse(self, info):
"""
Parse whether info contains a channel id or node public key and hand
it back. If no info could be extracted, raise a ValueError.
:param info:
:type info: str
:return: channel_id, node_pub_key
:rtype: int, str
"""
exp_channel_id = re.compile("^[0-9]{18}$")
exp_short_channel_id = re.compile("^[0-9]{6}x[0-9]{3}x[0-9]$")
exp_chan_point = re.compile("^[a-z0-9]{64}:[0-9]$")
exp_node_id = re.compile("^[a-z0-9]{66}$")
channel_id = None
node_pub_key = None
# prepare input string info
if exp_channel_id.match(info) is not None:
logger.debug("Info represents channel id.")
channel_id = int(info)
elif exp_short_channel_id.match(info) is not None:
logger.debug("Info represents short channel id.")
# TODO: convert short channel id to channel id
channel_id = 0
elif exp_chan_point.match(info) is not None:
# TODO: convert chan point to channel id
logger.debug("Info represents short channel id.")
channel_id = 0
elif exp_node_id.match(info) is not None:
logger.debug("Info represents node public key.")
node_pub_key = info
else:
raise ValueError("Info string doesn't match any pattern.")
return channel_id, node_pub_key
def print_channel_info(self, general_info):
"""
Prints the channel info with peer information.
:param general_info: information about the channel in the public graph
:type general_info: dict
"""
logger.info("-------- Channel info --------")
logger.info(f"channel id: {general_info['channel_id']} "
f"channel point: {general_info['chan_point']}")
# capactiy
string = padded_column_string(
'capacity:', general_info['capacity'], 'sat')
logger.info(f"{string:{COL_WIDTH*2}}")
# blockheight
string = padded_column_string(
'blockheight:', general_info['blockheight'], '')
logger.info(f"{string:{COL_WIDTH*2}}")
# opening time
time = datetime.datetime.utcfromtimestamp(
general_info['open_timestamp']).strftime('%Y-%m-%d %H:%M:%S')
string = padded_column_string('open since:', time, '')
logger.info(f"{string:{COL_WIDTH*2}}")
# channel age
age = round(
(self.node.blockheight - general_info['blockheight']) / 6 / 24, 2)
string = padded_column_string('channel age:', age, 'days')
logger.info(f"{string:{COL_WIDTH*2}}")
# last update
last_update = general_info['last_update']
last_update_time = datetime.datetime.utcfromtimestamp(
last_update).strftime('%Y-%m-%d %H:%M:%S')
string = padded_column_string('last update:', last_update_time, '')
logger.info(f"{string:{COL_WIDTH*2}}")
logger.info("")
# channel partner overview
logger.info("-------- Channel partners --------")
logger.info(f"{general_info['node1_pub']:{COL_WIDTH}} | "
f"{general_info['node2_pub']:{COL_WIDTH}}")
logger.info(f"{general_info['node1_alias']:^{COL_WIDTH}} | "
f"{general_info['node2_alias']:^{COL_WIDTH}}")
np1 = general_info['node1_policy']
np2 = general_info['node2_policy']
last_update_1 = np1['last_update']
last_update_2 = np2['last_update']
last_update_time_1 = datetime.datetime.utcfromtimestamp(
last_update_1).strftime('%Y-%m-%d %H:%M:%S')
last_update_time_2 = datetime.datetime.utcfromtimestamp(
last_update_2).strftime('%Y-%m-%d %H:%M:%S')
# base fee
string_left = padded_column_string(
'base fee:', np1['fee_base_msat'], 'msat')
string_right = padded_column_string(
'base fee:', np2['fee_base_msat'], 'msat')
logger.info(
f"{string_left:{COL_WIDTH}} | {string_right:{COL_WIDTH}}")
# fee rate
string_left = padded_column_string(
'fee rate:', np1['fee_rate_milli_msat'] / 1E6, 'sat/sat')
string_right = padded_column_string(
'fee rate:', np2['fee_rate_milli_msat'] / 1E6, 'sat/sat')
logger.info(
f"{string_left:{COL_WIDTH}} | {string_right:{COL_WIDTH}}")
# time lock delta
string_left = padded_column_string(
'time lock delta:', np1['time_lock_delta'], 'blocks')
string_right = padded_column_string(
'time lock delta:', np2['time_lock_delta'], 'blocks')
logger.info(
f"{string_left:{COL_WIDTH}} | {string_right:{COL_WIDTH}}")
# disabled
string_left = padded_column_string('disabled:', np1['disabled'], '')
string_right = padded_column_string('disabled:', np2['disabled'], '')
logger.info(
f"{string_left:{COL_WIDTH}} | {string_right:{COL_WIDTH}}")
# last update
string_left = padded_column_string(
'last update:', last_update_time_1, '')
string_right = padded_column_string(
'last update:', last_update_time_2, '')
logger.info(
f"{string_left:{COL_WIDTH}} | {string_right:{COL_WIDTH}}")
def print_node_info(self, general_info):
"""
Prints the node info.
:param general_info: information about the node in the public graph
:type general_info: dict
"""
logger.info("-------- Node info --------")
logger.info(general_info['pub_key'])
# alias
string = padded_column_string('alias:', general_info['alias'], '')
logger.info(f"{string:{COL_WIDTH*2}}")
# last update
last_update = general_info['last_update']
last_update_time = datetime.datetime.utcfromtimestamp(
last_update).strftime('%Y-%m-%d %H:%M:%S')
string = padded_column_string('last update:', last_update_time, '')
logger.info(f"{string:{COL_WIDTH*2}}")
# numer of channels
string = padded_column_string(
'number of channels:', general_info['num_channels'], '')
logger.info(f"{string:{COL_WIDTH*2}}")
# total capacity
string = padded_column_string(
'total capacity:', general_info['total_capacity'], 'sat')
logger.info(f"{string:{COL_WIDTH*2}}")
# capacity per channel
string = padded_column_string(
'capacity (median):', general_info['median_capacity'], 'sat')
logger.info(f"{string:{COL_WIDTH*2}}")
string = padded_column_string(
'capacity (mean):', general_info['mean_capacity'], 'sat')
logger.info(f"{string:{COL_WIDTH*2}}")
# fees
string = padded_column_string(
'base fee (median):', general_info['median_base_fee'], 'msat')
logger.info(f"{string:{COL_WIDTH*2}}")
string = padded_column_string(
'base fee (mean):', general_info['mean_base_fee'], 'msat')
logger.info(f"{string:{COL_WIDTH*2}}")
string = padded_column_string(
'fee rate (median):', general_info['median_fee_rate'], 'sat/sat')
logger.info(f"{string:{COL_WIDTH*2}}")
string = padded_column_string(
'fee rate (mean):', general_info['mean_fee_rate'], 'sat/sat')
logger.info(f"{string:{COL_WIDTH*2}}")
# addresses
logger.info("-------- Addresses --------")
for addr in general_info['addresses']:
logger.info(5 * " " + general_info['pub_key'] + "@" + addr)
|
the-stack_0_26941
|
# -*- coding: utf-8 -*-
# pylint: skip-file
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../hepdata_lib'))
if (sys.version_info > (3, 3)):
# Python 3.3
from unittest.mock import MagicMock
else:
# Python 2 and < 3.3
from mock import Mock as MagicMock
class Mock(MagicMock):
"""Mocking class for missing packages."""
@classmethod
def __getattr__(cls, name):
return MagicMock()
try:
import ROOT # pylint: disable=W0611
except ImportError:
MOCK_MODULES = ['ROOT']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Project information -----------------------------------------------------
project = 'hepdata_lib'
copyright = '2018-2020, Andreas Albert, Clemens Lange'
author = 'Andreas Albert, Clemens Lange'
# The short X.Y version
version = '0.8.1'
# The full version, including alpha/beta/rc tags
release = '0.8.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.githubpages',
'm2r',
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'hepdata_libdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hepdata_lib.tex', 'hepdata\\_lib Documentation',
'Andreas Albert, Clemens Lange', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hepdata_lib', 'hepdata_lib Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hepdata_lib', 'hepdata_lib Documentation',
author, 'hepdata_lib', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
autodoc_mock_imports = ["ROOT"]
|
the-stack_0_26942
|
import sqlite3
import discord
import re
from discord.ext import commands
from discord.utils import find
import secrets
from bank import Bank
from bank import Meme
bank = Bank()
memedb = Meme()
i_string = ('^(?:http:\/\/|https:\/\/).*\.?(?:imgur.com|redd.i'
't)\/[^\ ]*(?:.gif|.gifv|.png|.jpg|.jpeg|.mp4|.apng|.tiff)$')
v_string = ('^(?:http:\/\/|https:\/\/).*\.?(?:gfycat.com|streamable.com'
'|youtu.be|youtube.com|twitch.tv)\/[^\ ]*')
memebuck = '[̲̅$̲̅(̲̅ ͡° ͜ʖ ͡°̲̅)̲̅$̲'
THUMBS_UP = "\U0001F44D"
THUMBS_DOWN = "\U0001F44E"
def account_exists(ctx):
return bank.check_if_exists(ctx.message.author.id)
def is_admin(ctx):
admin_roles = secrets.ADMIN_ROLES
user_roles = ctx.message.author.roles
for role in user_roles:
if role.name in admin_roles:
return True
return False
class Memes:
def __init__(self, yeebot):
self.yeebot = yeebot
self.conn = sqlite3.connect('db/yee.db')
self.cur = self.conn.cursor()
self.image_link = re.compile(i_string)
self.video_link = re.compile(v_string)
@commands.check(account_exists)
@commands.group(pass_context=True, description='Return a random meme. Cost: 1 memebuck.')
async def meme(self, ctx):
if ctx.invoked_subcommand is None:
if bank.check_balance(ctx.message.author.id) > 1:
bank.withdraw(ctx.message.author.id, 1)
returned_meme = memedb.retrieve(user=ctx.message.author)
return await self.yeebot.say('{} Please enjoy this delicious meme brought to you by {}'.format(returned_meme[0], returned_meme[1]))
else:
return await self.yeebot.say("You don't have enough memebucks to do that.")
@commands.check(account_exists)
@meme.command(pass_context=True, description="Submit a meme to be reviewed and potentially added to the database.")
async def add(self, ctx, link):
if link:
v_link = self.video_link.match(link)
i_link = self.image_link.match(link)
vote_count = 0
neg_vote = 0
pos_vote = 0
if v_link or i_link:
if memedb.retrieve(link=link):
await self.yeebot.delete_message(ctx.message)
return await self.yeebot.say("Sorry, that link is already in the database.")
else:
# Add the link to the database, status = review
memedb.add(ctx.message.author, link)
# delete the submission message
await self.yeebot.delete_message(ctx.message)
# post the image for voting
msg = await self.yeebot.send_message(ctx.message.channel, 'Please vote on the following meme: {}'.format(link))
# add thumbs up and thumbs down to the image
await self.yeebot.add_reaction(msg, THUMBS_UP)
await self.yeebot.add_reaction(msg, THUMBS_DOWN)
# wait for votes on the image
while vote_count < secrets.VOTES_TO_COMPLETE:
reaction = await self.yeebot.wait_for_reaction(message=msg, emoji=[THUMBS_UP, THUMBS_DOWN])
print('Reaction added for {}'.format(link))
if reaction.reaction.emoji == THUMBS_UP and reaction.user != msg.author:
self.cur.execute("SELECT vote FROM votes WHERE voter_id = ? AND link = ?;", (reaction.user.id, link))
row = self.cur.fetchone()
if row:
user_vote = row[0]
if user_vote:
if user_vote == 'NEG':
print('User already has an active vote. Removing emoji and updating row.')
await self.yeebot.remove_reaction(msg, THUMBS_DOWN, reaction.user)
self.cur.execute("UPDATE votes SET vote = 'POS' WHERE voter_id = ? AND link = ?;", (reaction.user.id, link))
self.conn.commit()
pos_vote += 1
neg_vote -= 1
print('Positive vote: {}'.format(pos_vote))
print('Negative vote: {}'.format(neg_vote))
else:
print('User already made a positive vote. Do nothing.')
else:
pos_vote += 1
vote_count += 1
self.cur.execute("INSERT INTO votes (link, voter_id, vote) VALUES(?, ?, 'POS');", (link, reaction.user.id))
self.conn.commit()
print('Positive vote: {}'.format(pos_vote))
print('Negative vote: {}'.format(neg_vote))
elif reaction.reaction.emoji == THUMBS_DOWN and reaction.user != msg.author:
self.cur.execute("SELECT vote FROM votes WHERE voter_id = ? AND link = ?;", (reaction.user.id, link))
row = self.cur.fetchone()
if row:
user_vote = row[0]
if user_vote:
if user_vote == 'POS':
print('User has an active vote. Removing emoji and updating row')
await self.yeebot.remove_reaction(msg, THUMBS_UP, reaction.user)
self.cur.execute("UPDATE votes SET vote = 'NEG' WHERE voter_id = ? AND link = ?;", (reaction.user.id, link))
self.conn.commit()
pos_vote -= 1
neg_vote += 1
print('Positive vote: {}'.format(pos_vote))
print('Negative vote: {}'.format(neg_vote))
else:
print('User already made a negative vote. Do nothing.')
else:
neg_vote += 1
vote_count += 1
self.cur.execute("INSERT INTO votes (link, voter_id, vote) VALUES(?, ?, 'NEG');", (link, reaction.user.id))
self.conn.commit()
print('Positive vote: {}'.format(pos_vote))
print('Negative vote: {}'.format(neg_vote))
print('{} vote over'.format(link))
if pos_vote > neg_vote:
memedb.approve(link)
bank.deposit(ctx.message.author.id, 10)
await self.yeebot.delete_message(msg)
return await self.yeebot.say("{}'s link `{}` has been approved.".format(ctx.message.author.mention, link))
elif neg_vote > pos_vote:
memedb.reject(link)
await self.yeebot.delete_message(msg)
return await self.yeebot.say("{}'s link `{}` has been rejected.".format(ctx.message.author.mention, link))
else:
await self.yeebot.delete_message(ctx.message)
return await self.yeebot.say('Please only submit links from Youtube, GfyCat, Streamable, Twitch, Imgur, and Reddit. Only direct image links are accepted. Regular video links are ok.')
@commands.check(is_admin)
@meme.command(pass_context=True, hidden=True)
async def reject(self, ctx, link):
if link:
memedb.reject(link)
await self.yeebot.delete_message(ctx.message)
return await self.yeebot.say('<{}> has been rejected.'.format(link))
else:
return await self.yeebot.say('Reject what?')
@commands.check(is_admin)
@meme.command(pass_context=True, hidden=True)
async def approve (self, ctx, link):
if link:
memedb.approve(link)
await self.yeebot.delete_message(ctx.message)
return await self.yeebot.say('<{}> has been approved.'.format(link))
else:
return await self.yeebot.say('Approve what?')
def setup(yeebot):
yeebot.add_cog(Memes(yeebot))
|
the-stack_0_26943
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 14 16:33:19 2019
@author: Zymieth
"""
from operator import itemgetter
with open('APSP1.txt') as file:
x1 = [row[:-1] for row in file]
x1 = [[int(r) for r in row.split(" ")] for row in x1]
with open('APSP2.txt') as file:
x2 = [row[:-1] for row in file]
x2 = [[int(r) for r in row.split(" ")] for row in x2]
with open('APSP3.txt') as file:
x3 = [row[:-1] for row in file]
x3 = [[int(r) for r in row.split(" ")] for row in x3]
def adj_listify(g):
'''
Returns the adjacency list representation of a directed graph G from a naive
list of the form [tail, head, edge_cost]
'''
graph = []
sol = []
g = sorted(g, key=itemgetter(0))
curr = g[0][0]
sol.append([curr])
for idx, row in enumerate(g):
if curr == row[0]:
sol.append([row[1], row[2]])
else:
graph.append(sol)
sol = []
curr = row[0]
sol.append([curr])
sol.append([row[1], row[2]])
# final remaining sublist
graph.append(sol)
return graph
def adj_listify_target(g):
'''
Returns the adjacency list representation of a directed graph G from a naive
list of the form [tail, head, edge_cost]
where the first element of each sublist is the head of an edge
'''
graph = []
sol = []
g = sorted(g, key=itemgetter(1))
curr = g[0][1]
sol.append([curr])
for idx, row in enumerate(g):
if curr == row[1]:
sol.append([row[0], row[2]])
else:
graph.append(sol)
sol = []
curr = row[1]
sol.append([curr])
sol.append([row[0], row[2]])
# final remaining sublist
graph.append(sol)
return graph
class Graph(object):
def __init__(self, adj_list, adj_list_targets):
self.nodes = [n[0][0] for n in adj_list]
self.edges = [n for n in adj_list]
self.heads = [n for n in adj_list_targets]
self.visited = []
def augment(self):
'''
augments graph with artificial node with 0 edge connections
to all other vertices
'''
new = len(self.nodes)+1
self.nodes.append(new)
for row in self.heads:
row.append([new, 0])
def restoreOriginal(self) :
'''
restores the original G from the augmented version
'''
def dijkstra(self, start):
A = {}
node = start
# initialization of hash
for n in range(1,len(self.nodes)+1):
A[n] = float('inf')
A[node] = 0
while self.visited != self.nodes:
if len(self.visited) == len(self.nodes) - 1:
self.visited.append(node)
break
self.visited.append(node)
opt = float('inf')
mcost = float('inf')
for n in self.visited:
candidates = [e for e in self.edges[n-1][1:]
if e[0] not in self.visited]
if len(candidates) != 0:
for c in candidates:
nxt, cost = c
# check optimality of edge selection
if A[n]+cost < A[n]+mcost and A[n]+cost < opt:
opt = A[n]+cost
mnxt, mcost, origin = nxt, cost, n
A[mnxt] = A[origin] + mcost
node = mnxt
# reset visited
self.visited = []
return A
def bellmanFord(self, start):
A_prev, A = {}, {}
for n in range(1,len(self.nodes)+1):
A_prev[n] = float('inf')
A_prev[1] = 0
# edge budget
for i in range(1, len(self.nodes)+1):
# destination vertex
for j in range(1, len(self.nodes)+1):
# bruteforce search to find min edge going into vertex j
candidate = min(self.heads[j-1][1:],\
key = lambda x: x[1] + A_prev[x[0]])
source, cost = candidate[0], candidate[1]
A[j] = min(A_prev[j], A_prev[source] + cost)
A_prev, A = A, {}
return A_prev
# test on small undirected graph
sg = [[1, 2, 20], [1, 4, 30],
[2, 1, 20], [2, 3, 20],
[3, 2, 30], [3, 4, 20],
[4, 1, 30], [4, 3, 20]]
sg1 = adj_listify(sg)
sg2 = adj_listify_target(sg)
sG = Graph(sg1, sg2)
a1 = sG.dijkstra(1)
a2 = sG.bellmanFord(1)
# x1 = adj_listify(x1)
# x2 = adj_listify(x2)
# x3 = adj_listify(x3)
#
# g1 = Graph(x1)
# =============================================================================
|
the-stack_0_26944
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.purview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationList"]
"""Lists the available operations.
List of available operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.purview.models.OperationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseModel, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Purview/operations'} # type: ignore
|
the-stack_0_26945
|
#!/usr/bin/env python
"""
#
#
# File Name: scale/specifity.py
# Description:
"""
import numpy as np
import pandas as pd
import scipy as sp
def jsd(p, q, base=np.e):
"""
Jensen Shannon_divergence
"""
## convert to np.array
p, q = np.asarray(p), np.asarray(q)
## normalize p, q to probabilities
p, q = p/p.sum(), q/q.sum()
m = 1./2*(p + q)
return sp.stats.entropy(p,m, base=base)/2. + sp.stats.entropy(q, m, base=base)/2.
def jsd_sp(p, q, base=np.e):
"""
Define specificity score:
score = 1 - sqrt(jsd(p, q))
"""
return 1- jsd(p, q, base=np.e)**0.5
def log2norm(e):
"""
log2(e+1) normalization
"""
loge = np.log2(e+1)
return loge/sum(loge)
def predefined_pattern(t, labels):
q = np.zeros(len(labels))
q[np.where(labels==t)[0]] = 1
return q
def vec_specificity_score(e, t, labels):
"""
Calculate a vector specificity for cluster t
"""
e = log2norm(e)
et = log2norm(predefined_pattern(t, labels))
return jsd_sp(e, et)
def mat_specificity_score(mat, labels):
"""
Calculate all peaks or genes specificity across all clusters
Return:
peaks/genes x clusters dataframe
"""
scores = []
for i in np.unique(labels):
score = mat.apply(lambda x: vec_specificity_score(x, i, labels), axis=1)
scores.append(score)
return pd.concat(scores, axis=1)
def cluster_specific(score_mat, classes=None, top=0):
"""
Identify top specific peaks for each cluster
Input:
score_mat calculated by mat_specificity_score
Return:
specific peaks index and peaks labels
"""
scores = score_mat.max(1)
peak_labels = np.argmax(score_mat.values, axis=1)
inds = []
labels = []
if classes is None:
classes = np.unique(peak_labels)
for i in classes:
index = np.where(peak_labels==i)[0]
ind = np.argsort(scores[index])[-top:]
ind = index[ind]
inds.append(ind)
labels.append(peak_labels[ind])
return np.concatenate(inds), np.concatenate(labels)
|
the-stack_0_26946
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PromotionRelationDTO import PromotionRelationDTO
class AlipayOpenSpIsvRelationQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenSpIsvRelationQueryResponse, self).__init__()
self._current_page = None
self._page_size = None
self._promotion_relations = None
self._total_size = None
@property
def current_page(self):
return self._current_page
@current_page.setter
def current_page(self, value):
self._current_page = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def promotion_relations(self):
return self._promotion_relations
@promotion_relations.setter
def promotion_relations(self, value):
if isinstance(value, list):
self._promotion_relations = list()
for i in value:
if isinstance(i, PromotionRelationDTO):
self._promotion_relations.append(i)
else:
self._promotion_relations.append(PromotionRelationDTO.from_alipay_dict(i))
@property
def total_size(self):
return self._total_size
@total_size.setter
def total_size(self, value):
self._total_size = value
def parse_response_content(self, response_content):
response = super(AlipayOpenSpIsvRelationQueryResponse, self).parse_response_content(response_content)
if 'current_page' in response:
self.current_page = response['current_page']
if 'page_size' in response:
self.page_size = response['page_size']
if 'promotion_relations' in response:
self.promotion_relations = response['promotion_relations']
if 'total_size' in response:
self.total_size = response['total_size']
|
the-stack_0_26947
|
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
import tqdm
from _thread import start_new_thread
from functools import wraps
from dgl.data import RedditDataset
from torch.utils.data import DataLoader
from torch.nn.parallel import DistributedDataParallel
class SAGEConvWithCV(nn.Module):
def __init__(self, in_feats, out_feats, activation):
super().__init__()
self.W = nn.Linear(in_feats * 2, out_feats)
self.activation = activation
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain('relu')
nn.init.xavier_uniform_(self.W.weight, gain=gain)
nn.init.constant_(self.W.bias, 0)
def forward(self, block, H, HBar=None):
if self.training:
with block.local_scope():
H_src, H_dst = H
HBar_src, agg_HBar_dst = HBar
block.dstdata['agg_hbar'] = agg_HBar_dst
block.srcdata['hdelta'] = H_src - HBar_src
block.update_all(fn.copy_u('hdelta', 'm'), fn.mean('m', 'hdelta_new'))
h_neigh = block.dstdata['agg_hbar'] + block.dstdata['hdelta_new']
h = self.W(th.cat([H_dst, h_neigh], 1))
if self.activation is not None:
h = self.activation(h)
return h
else:
with block.local_scope():
H_src, H_dst = H
block.srcdata['h'] = H_src
block.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h_new'))
h_neigh = block.dstdata['h_new']
h = self.W(th.cat([H_dst, h_neigh], 1))
if self.activation is not None:
h = self.activation(h)
return h
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(SAGEConvWithCV(in_feats, n_hidden, activation))
for i in range(1, n_layers - 1):
self.layers.append(SAGEConvWithCV(n_hidden, n_hidden, activation))
self.layers.append(SAGEConvWithCV(n_hidden, n_classes, None))
def forward(self, blocks):
h = blocks[0].srcdata['features']
updates = []
for layer, block in zip(self.layers, blocks):
# We need to first copy the representation of nodes on the RHS from the
# appropriate nodes on the LHS.
# Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst
# would be (num_nodes_RHS, D)
h_dst = h[:block.number_of_dst_nodes()]
hbar_src = block.srcdata['hist']
agg_hbar_dst = block.dstdata['agg_hist']
# Then we compute the updated representation on the RHS.
# The shape of h now becomes (num_nodes_RHS, D)
h = layer(block, (h, h_dst), (hbar_src, agg_hbar_dst))
block.dstdata['h_new'] = h
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = th.arange(g.number_of_nodes())
ys = []
for l, layer in enumerate(self.layers):
y = th.zeros(g.number_of_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes)
for start in tqdm.trange(0, len(nodes), batch_size):
end = start + batch_size
batch_nodes = nodes[start:end]
block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)
block = block.to(device)
induced_nodes = block.srcdata[dgl.NID]
h = x[induced_nodes].to(device)
h_dst = h[:block.number_of_dst_nodes()]
h = layer(block, (h, h_dst))
y[start:end] = h.cpu()
ys.append(y)
x = y
return y, ys
class NeighborSampler(object):
def __init__(self, g, fanouts):
self.g = g
self.fanouts = fanouts
def sample_blocks(self, seeds):
seeds = th.LongTensor(seeds)
blocks = []
hist_blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout)
hist_frontier = dgl.in_subgraph(self.g, seeds)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
hist_block = dgl.to_block(hist_frontier, seeds)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
hist_blocks.insert(0, hist_block)
return blocks, hist_blocks
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, labels, val_mask, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
inputs = g.ndata['features']
pred, _ = model.inference(g, inputs, batch_size, device)
model.train()
return compute_acc(pred[val_mask], labels[val_mask])
def load_subtensor(g, labels, blocks, hist_blocks, dev_id, aggregation_on_device=False):
"""
Copys features and labels of a set of nodes onto GPU.
"""
blocks[0].srcdata['features'] = g.ndata['features'][blocks[0].srcdata[dgl.NID]]
blocks[-1].dstdata['label'] = labels[blocks[-1].dstdata[dgl.NID]]
ret_blocks = []
ret_hist_blocks = []
for i, (block, hist_block) in enumerate(zip(blocks, hist_blocks)):
hist_col = 'features' if i == 0 else 'hist_%d' % i
block.srcdata['hist'] = g.ndata[hist_col][block.srcdata[dgl.NID]]
# Aggregate history
hist_block.srcdata['hist'] = g.ndata[hist_col][hist_block.srcdata[dgl.NID]]
if aggregation_on_device:
hist_block = hist_block.to(dev_id)
hist_block.update_all(fn.copy_u('hist', 'm'), fn.mean('m', 'agg_hist'))
block = block.to(dev_id)
if not aggregation_on_device:
hist_block = hist_block.to(dev_id)
block.dstdata['agg_hist'] = hist_block.dstdata['agg_hist']
ret_blocks.append(block)
ret_hist_blocks.append(hist_block)
return ret_blocks, ret_hist_blocks
def init_history(g, model, dev_id):
with th.no_grad():
history = model.inference(g, g.ndata['features'], 1000, dev_id)[1]
for layer in range(args.num_layers + 1):
if layer > 0:
hist_col = 'hist_%d' % layer
g.ndata['hist_%d' % layer] = history[layer - 1]
def update_history(g, blocks):
with th.no_grad():
for i, block in enumerate(blocks):
ids = block.dstdata[dgl.NID].cpu()
hist_col = 'hist_%d' % (i + 1)
h_new = block.dstdata['h_new'].cpu()
g.ndata[hist_col][ids] = h_new
def run(args, dev_id, data):
dropout = 0.2
th.cuda.set_device(dev_id)
# Unpack data
train_mask, val_mask, in_feats, labels, n_classes, g = data
train_nid = th.LongTensor(np.nonzero(train_mask)[0])
val_nid = th.LongTensor(np.nonzero(val_mask)[0])
train_mask = th.BoolTensor(train_mask)
val_mask = th.BoolTensor(val_mask)
# Create sampler
sampler = NeighborSampler(g, [int(_) for _ in args.fan_out.split(',')])
# Create PyTorch DataLoader for constructing blocks
dataloader = DataLoader(
dataset=train_nid.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False,
num_workers=args.num_workers_per_gpu)
# Define model
model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu)
# Move the model to GPU and define optimizer
model = model.to(dev_id)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(dev_id)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Compute history tensor and their aggregation before training on CPU
model.eval()
init_history(g, model, dev_id)
model.train()
# Training loop
avg = 0
iter_tput = []
for epoch in range(args.num_epochs):
tic = time.time()
model.train()
tic_step = time.time()
for step, (blocks, hist_blocks) in enumerate(dataloader):
# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
input_nodes = blocks[0].srcdata[dgl.NID]
seeds = blocks[-1].dstdata[dgl.NID]
blocks, hist_blocks = load_subtensor(g, labels, blocks, hist_blocks, dev_id, True)
# forward
batch_pred = model(blocks)
# update history
update_history(g, blocks)
# compute loss
batch_labels = blocks[-1].dstdata['label']
loss = loss_fcn(batch_pred, batch_labels)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % args.log_every == 0:
acc = compute_acc(batch_pred, batch_labels)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f}'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:])))
tic_step = time.time()
toc = time.time()
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0 and epoch != 0:
model.eval()
eval_acc = evaluate(model, g, labels, val_nid, args.val_batch_size, dev_id)
print('Eval Acc {:.4f}'.format(eval_acc))
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--gpu', type=str, default='0')
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--fan-out', type=str, default='1,1')
argparser.add_argument('--batch-size', type=int, default=1000)
argparser.add_argument('--val-batch-size', type=int, default=1000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=5)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--num-workers-per-gpu', type=int, default=0)
args = argparser.parse_args()
# load reddit data
data = RedditDataset(self_loop=True)
n_classes = data.num_classes
g = data[0]
features = g.ndata['feat']
in_feats = features.shape[1]
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
g.ndata['features'] = features
g.create_format_()
# Pack data
data = train_mask, val_mask, in_feats, labels, n_classes, g
run(args, int(args.gpu), data)
|
the-stack_0_26949
|
import logging
from dataclasses import asdict
from typing import Optional, Tuple
from dacite import from_dict
from common.Db import get_db
from common.SlackBot import BotsConversation, Bot, TwoWayBot
logger = logging.getLogger(__name__)
def register_bot(authentication_code: str, bot: Bot):
"""
Register bot to the system.
"""
get_db().hmset(f'registration.{authentication_code}', asdict(bot))
def get_bot(authentication_code: str) -> Tuple[Bot, Optional[TwoWayBot]]:
"""
Retrieves bot by auth code.
"""
data = get_db().hgetall(f'registration.{authentication_code}')
# find out whether this bot has URL, if so it is TwoWayBot
if data and data.get('bot_url'):
two_way = from_dict(data_class=TwoWayBot, data=data)
return two_way, two_way
return from_dict(data_class=Bot, data=data), None
def register_conversation(authentication_code: str, bot_id: str, roman_token: str):
"""
Register new conversation for the authentication_code.
"""
bot, _ = get_bot(authentication_code)
payload = BotsConversation(bot_api_key=bot.bot_api_key, roman_token=roman_token)
get_db().hmset(f'conversation.{bot_id}', asdict(payload))
def delete_conversation(bot_id: str):
"""
Deletes conversation.
"""
count = get_db().delete(f'conversation.{bot_id}')
logger.info(f'{count} conversation(s) deleted for bot {bot_id}.')
def get_conversation(bot_id: str) -> BotsConversation:
"""
Retrieves conversation by bot id.
"""
data = get_db().hgetall(f'conversation.{bot_id}')
logger.debug(f'Retrieved payload - {data}')
return BotsConversation(bot_api_key=data['bot_api_key'], roman_token=data['roman_token'])
def get_conversation_checked(bot_id: str, used_api_key: str) -> Optional[BotsConversation]:
"""
Retrieves conversation and checks token. Returns None if api keys don't match.
"""
try:
conversation = get_conversation(bot_id)
if conversation.bot_api_key == used_api_key:
return conversation
else:
logger.warning(f'Bot {bot_id} used API key {used_api_key}, but different was expected.')
except Exception:
logger.warning(f'It was not possible to obtain conversation! Bot {bot_id} and api key {used_api_key}.')
return None
|
the-stack_0_26950
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
# Paper: https://arxiv.org/pdf/1508.04025
class Encoder(nn.Module):
"""
编码器,非常简单,双向RNN,考虑前后上下文
"""
def __init__(self, vocab_size, embed_size, enc_hidden_size, dec_hidden_size, dropout=0.2):
super(Encoder, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(embed_size, enc_hidden_size, batch_first=True, bidirectional=True)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(enc_hidden_size * 2, dec_hidden_size)
def forward(self, x, lengths):
x = self.dropout(self.embed(x))
# 句子长度不一样,需使用pack_padded_sequence,告诉rnn,长度之后pad不需要处理
x = pack_padded_sequence(x, lengths, batch_first=True, enforce_sorted=False)
out, hid = self.rnn(x)
# -> batch_size, x_len,2*hidden_size
out, _ = pad_packed_sequence(out, batch_first=True)
# hid dim: 2, batch_size, hidden_size -> batch_size,hidden_size*2
hid = torch.cat((hid[-2], hid[-1]), dim=1)
# hid匹配到dec_hidden_size,作为encoder hid 的输入
hid = torch.tanh(self.fc(hid)).unsqueeze(0)
return out, hid
class Attention(nn.Module):
"""
att = softmax(<encoded_ys,encoded_xs>)
att_xs = att * encoded_xs
output = [att_xs,ys]
"""
def __init__(self, enc_hidden_size, dec_hidden_size):
super(Attention, self).__init__()
self.enc_hidden_size = enc_hidden_size
self.dec_hidden_size = dec_hidden_size
self.fc_in = nn.Linear(enc_hidden_size * 2, dec_hidden_size, bias=False)
self.fc_out = nn.Linear(enc_hidden_size * 2 + dec_hidden_size, dec_hidden_size)
def forward(self, xs, ys, mask):
"""
1. 转换维度: xs' = fc(xs)
2. 计算Att系数:att = softmax(<ys,xs'>)
3. 获得Att: att_xs = att * xs
4. 拼接:cat(att_xs,ys)
:param xs: encoded inputs.dim: batch_size, x_len, x_size(encode_hidden_size)
:param ys: encoded labels.dim: batch_size, 1 or y_len, y_size(decode_hidden_size)
:param mask: PAD 字符为True,其他False
:return: dim: batch_size,y_len,y_size
"""
batch_size = xs.size(0)
input_len = xs.size(1)
output_len = ys.size(1)
# 1.转换维度: 使得输入的维度,和输出维度匹配,否则没法做内积。 batch_size , x_len, x_size -> batch_size , x_len, y_size
xs_ = self.fc_in(xs.view(batch_size * input_len, -1)).view(batch_size, input_len, -1)
# 2.计算Att系数:x,y直接做内积获得x,y的相关性 ,
# att = <x,y> = Y*X^T in batch -> batch_size , y_len , x_len(每个字符上的权重)
att = torch.bmm(ys, xs_.transpose(1, 2))
# PAD 字符的系数应趋于0
att.masked_fill_(mask, -1e6)
att = F.softmax(att, dim=2)
# 3. 获得Att: ->batch_size,y_len,y_size
att_xs = torch.bmm(att, xs)
# 4. 拼接:cat(att_xs,ys) -> batch_size, y_len, x_size+y+size
output = torch.cat((att_xs, ys), dim=2)
# 下面两步:转换到输出维度 -> batch_size,y_len,y_size
output = output.view(batch_size * output_len, -1)
output = torch.tanh(self.fc_out(output)).view(batch_size, output_len, -1)
return output, att
class Decoder(nn.Module):
def __init__(self, vocab_size, embed_size, enc_hidden_size, dec_hidden_size, dropout=0.2):
super(Decoder, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.attention = Attention(enc_hidden_size, dec_hidden_size)
self.rnn = nn.GRU(embed_size, dec_hidden_size, batch_first=True)
self.dropout = nn.Dropout(dropout)
self.out_fc = nn.Linear(dec_hidden_size, vocab_size)
def create_mask(self, ys_len, xs_len):
"""
和输入字符无关的字符(PAD)标记为0,否则标记为1
:param ys_len:
:param xs_len:
:return: (batch_size, ys_len, xs_len)
"""
y_max = ys_len.max()
x_max = xs_len.max()
# 有效字符为true
y_mask = ys_len[:, None] > torch.arange(y_max)[None, :]
x_mask = xs_len[:, None] > torch.arange(x_max)[None, :]
# true: y字符存在并且x字符也存在 , 然后取反
return ~(y_mask[:, :, None] * x_mask[:, None, :])
def forward(self, xs, xs_len, ys, ys_len, hid):
ys = self.dropout(self.embed(ys))
ys = pack_padded_sequence(ys, ys_len, batch_first=True, enforce_sorted=False)
out, hid = self.rnn(ys, hid)
out, _ = pad_packed_sequence(out, batch_first=True)
mask = self.create_mask(ys_len, xs_len)
out, att = self.attention(xs, out, mask)
out = F.log_softmax(self.out_fc(out), -1)
return out, hid, att
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, xs, xs_len, ys, ys_len):
enc_xs, hid = self.encoder(xs, xs_len)
output, hid, att = self.decoder(enc_xs, xs_len, ys, ys_len, hid)
return output, att
def translate(self, xs, xs_len, ys, max_length=20):
enc_xs, hid = self.encoder(xs, xs_len)
preds = []
batch_size = xs.size(0)
atts = []
for i in range(max_length):
ys_len = torch.ones(batch_size)
output, hid, att = self.decoder(enc_xs, xs_len, ys, ys_len, hid)
ys = output.max(2)[1].view(batch_size, 1)
preds.append(ys)
atts.append(att)
return torch.cat(preds, 1), torch.cat(atts, 1)
class LanguageCriterion(nn.Module):
def __init__(self):
super(LanguageCriterion, self).__init__()
def forward(self, pred, target, mask):
pred = pred.contiguous().view(-1, pred.size(2))
target = target.contiguous().view(-1, 1)
mask = mask.contiguous().view(-1, 1)
# 交叉熵
mask = mask.float()
output = -pred.gather(1, target) * mask
output = torch.sum(output) / torch.sum(mask)
return output
from seq2seq.data_loader import train_dataloader, en_total_words, cn_total_words, test_dataloader, decode_sents, BOS, \
cn_dict
hidden_size = 100
embedding_size = 100
encoder = Encoder(en_total_words, embedding_size, hidden_size, hidden_size)
decoder = Decoder(cn_total_words, embedding_size, hidden_size, hidden_size)
model = Seq2Seq(encoder, decoder)
loss_fn = LanguageCriterion()
optimizer = torch.optim.Adam(model.parameters())
def eval():
model.eval()
total_num_words = total_loss = 0.
with torch.no_grad():
for i, (en, en_lens, cn, cn_lens) in enumerate(test_dataloader):
cn_input = cn[:, :-1] # 去掉最后一个字符
cn_output = cn[:, 1:] # 输出从第一个字符开始,不是<BOS>; <BOS> -> 第一个字符;最后一个字符-> <EOS>
cn_lens = cn_lens - 1
cn_lens[cn_lens <= 0] = 1 # 仅保留[<BOS>]
pred, _ = model(en, en_lens, cn_input, cn_lens)
# 只计算句子长度之内的损失。
output_mask = torch.arange(cn_lens.max().item())[None, :] < cn_lens[:, None]
loss = loss_fn(pred, cn_output, output_mask)
num_words = torch.sum(cn_lens).item()
total_loss += loss.item() * num_words
total_num_words += num_words
print("Evaluation loss", total_loss / total_num_words)
def train(num_epoch=10):
print(torch.__config__.parallel_info())
for epoch in range(num_epoch):
model.train()
total_num_words = 0
total_loss = 0
for i, (en, en_lens, cn, cn_lens) in enumerate(train_dataloader):
cn_input = cn[:, :-1] # 去掉最后一个字符
cn_output = cn[:, 1:] # 输出从第一个字符开始,不是<BOS>; <BOS> -> 第一个字符;最后一个字符-> <EOS>
cn_lens = cn_lens - 1
cn_lens[cn_lens <= 0] = 1 # 修补长度为1的句子
# cn_input: 全部传入,可做teacher,优化训练
# 输出:[batch, sentence_len , pb_on_cn_words]
pred, _ = model(en, en_lens, cn_input, cn_lens)
# 只计算句子长度之内的损失。
output_mask = torch.arange(cn_lens.max().item())[None, :] < cn_lens[:, None]
loss = loss_fn(pred, cn_output, output_mask)
num_words = torch.sum(cn_lens).item()
total_loss += loss.item() * num_words
total_num_words += num_words
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.)
optimizer.step()
if i % 100 == 0:
print("Epoch", epoch, "iteration", i, " training loss", loss.item())
translate()
eval()
torch.save(model.state_dict(), f"rnn_attention_model_epoch_{epoch}.model")
print("Epoch", epoch, "Training loss", total_loss / total_num_words)
def translate():
# model.eval()
total_num_words = total_loss = 0.
with torch.no_grad():
for i, (en, en_lens, cn, cn_lens) in enumerate(test_dataloader):
# cn_input = cn[:, :-1] # 去掉最后一个字符
cn_output = cn[:, 1:] # 输出从第一个字符开始,不是<BOS>; <BOS> -> 第一个字符;最后一个字符-> <EOS>
# cn_lens = cn_lens - 1
# cn_lens[cn_lens <= 0] = 1 # 仅保留[<BOS>]
bos = torch.zeros(en.size(0), 1)
bos.fill_(cn_dict[BOS])
# cn_input: 全部传入,可做teacher,优化训练
# 输出:[batch, sentence_len , pb_on_cn_words]
pred, atts = model.translate(en, en_lens, cn_output)
print(decode_sents(en, False))
print(decode_sents(cn))
print(decode_sents(pred))
return
if __name__ == '__main__':
# translate()
train()
|
the-stack_0_26951
|
"""Copy and paste using the internet"""
__version__ = '0.1'
import argparse
import pyperclip as clipboard
import requests
from bs4 import BeautifulSoup
from colorama import Fore
def create_clip(l, m):
"""Create clip """
url = 'https://cl1p.net/' + l
response = requests.get(url)
page = response.content
soup = BeautifulSoup(page, 'html.parser')
clipContent = soup.find("div", {"id": "cl1pContent"})
if clipContent is not None:
action = soup.find("input", {"name": "action"})['value'].strip()
pageHash = soup.find("input", {"name": "pageHash"})['value'].strip()
seqHash = soup.find("input", {"name": "seqHash"})['value'].strip()
data = {'content': m,
'action': action,
'pageHash': pageHash,
'seqHash': seqHash}
r = requests.post(url=url, data=data)
pastebin_url = r.text
print("")
print(Fore.BLUE + "The cl1p URL is : %s" % url)
print("")
else:
print("")
print(Fore.RED + "" + url + " is already in use please use different url . Use \'-l\' flag to specify url ")
print("")
def get_clip(l, c):
"""get clip from cl1p.net"""
url = 'https://cl1p.net/' + l
response = requests.get(url)
page = response.content
soup = BeautifulSoup(page, 'html.parser')
contentRead = soup.find("div", {"class": "contentRead"})
if contentRead is not None:
textContetnt = soup.find("textarea", {"name": "content"})
if c:
clipboard.copy(textContetnt.text)
print("")
print(Fore.BLUE + "Content copied to clipboard")
print("")
else:
print(" ")
print(Fore.BLUE + "*************************************")
print(Fore.BLUE + "\t\tClip data")
print(Fore.BLUE + "*************************************")
print(Fore.RESET + "")
print(textContetnt.text)
print(" ")
else:
print(" ")
print(Fore.RED + "No clip is present at " + url + ".")
print(" ")
def run():
parser = argparse.ArgumentParser(
description="cl1p.net lets you move information between computers using your internet")
parser.add_argument("-l", "--l",
action='store',
help='Set URL',
type=str,
required=True)
parser.add_argument("-m", "--m",
action='store',
help='Set clip message',
type=str,
required=False)
parser.add_argument("-c", "--c",
action='store_true',
help='Copy clip content directly to clipboard',
required=False)
parser.add_argument("-d", "--d",
action='store_true',
help='Create clip directly from clipboard',
required=False)
args = parser.parse_args()
if args.m is not None:
create_clip(args.l, args.m)
elif args.d:
text = clipboard.paste()
create_clip(args.l, text)
else:
get_clip(args.l, args.c)
if __name__ == '__main__':
run()
|
the-stack_0_26953
|
# -*- coding: utf8 -*-
# Copyright 2012 Harald Schilly <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from panobbgo.core import Heuristic
class Extremal(Heuristic):
"""
This heuristic is specifically seeking for points at the
border of the box and around 0.
The @where parameter takes a list or tuple, which has values
from 0 to 1, which indicate the probability for sampling from the
minimum, zero, center and the maximum. default = ( 1, .2, .2, 1 )
"""
def __init__(self, strategy, diameter=1. / 10, prob=None):
Heuristic.__init__(self, strategy, name="Extremal")
import numpy as np
if prob is None:
prob = (1, .2, .2, 1)
prob = np.array(prob) / float(np.sum(prob))
self.probabilities = prob.cumsum()
self.diameter = diameter # inside the box or around zero
self.vals = None
def __start__(self):
import numpy as np
problem = self.problem
low = problem.box[:, 0]
high = problem.box[:, 1]
zero = np.zeros(problem.dim)
center = low + (high - low) / 2.
self.vals = np.row_stack((low, zero, center, high))
def on_start(self):
import numpy as np
while True:
ret = np.empty(self.problem.dim)
for i in range(self.problem.dim):
r = np.random.rand()
for idx, val in enumerate(self.probabilities):
if val > r:
radius = self.problem.ranges[i] * self.diameter
# jitter = radius * (np.random.rand() - .5)
jitter = np.random.normal(0, radius)
if idx == 0:
# minimum border
ret[i] = self.vals[idx, i] + abs(jitter)
elif idx == len(self.probabilities) - 1:
# maximum border
ret[i] = self.vals[idx, i] - abs(jitter)
else:
# around center or zero
ret[i] = self.vals[idx, i] + jitter
break # since we found the idx, break!
self.emit(ret)
# stop early, if run by unittests
if self.strategy.config.testing_mode:
return
|
the-stack_0_26955
|
import base64
import logging
import shutil
from enum import Enum
from typing import NamedTuple, Optional
from typing_extensions import Literal
from rotkehlchen.data_handler import DataHandler
from rotkehlchen.errors import (
PremiumAuthenticationError,
RemoteError,
RotkehlchenPermissionError,
UnableToDecryptRemoteData,
)
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.premium.premium import Premium, PremiumCredentials, premium_create_and_verify
from rotkehlchen.utils.misc import timestamp_to_date, ts_now
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class CanSync(Enum):
YES = 0
NO = 1
ASK_USER = 2
class SyncCheckResult(NamedTuple):
# The result of the sync check
can_sync: CanSync
# If result is ASK_USER, what should the message be?
message: str
class PremiumSyncManager():
def __init__(self, data: DataHandler, password: str) -> None:
self.last_data_upload_ts = 0
self.data = data
self.password = password
self.premium: Optional[Premium] = None
def _can_sync_data_from_server(self, new_account: bool) -> SyncCheckResult:
"""
Checks if the remote data can be pulled from the server.
Returns a SyncCheckResult denoting whether we can pull for sure,
whether we can't pull or whether the user should be asked. If the user
should be asked a message is also returned
"""
log.debug('can sync data from server -- start')
if self.premium is None:
return SyncCheckResult(can_sync=CanSync.NO, message='')
b64_encoded_data, our_hash = self.data.compress_and_encrypt_db(self.password)
try:
metadata = self.premium.query_last_data_metadata()
except RemoteError as e:
log.debug('can sync data from server failed', error=str(e))
return SyncCheckResult(can_sync=CanSync.NO, message='')
if new_account:
return SyncCheckResult(can_sync=CanSync.YES, message='')
if not self.data.db.get_premium_sync():
# If it's not a new account and the db setting for premium syncin is off stop
return SyncCheckResult(can_sync=CanSync.NO, message='')
log.debug(
'CAN_PULL',
ours=our_hash,
theirs=metadata.data_hash,
)
if our_hash == metadata.data_hash:
log.debug('sync from server stopped -- same hash')
# same hash -- no need to get anything
return SyncCheckResult(can_sync=CanSync.NO, message='')
our_last_write_ts = self.data.db.get_last_write_ts()
data_bytes_size = len(base64.b64decode(b64_encoded_data))
if our_last_write_ts >= metadata.last_modify_ts:
message_prefix = (
'Detected remote database BUT with older last modification timestamp '
'than the local one. '
)
else:
if data_bytes_size > metadata.data_size:
message_prefix = (
'Detected newer remote database BUT with smaller size than the local one. '
)
else:
message_prefix = 'Detected newer remote database. '
message = (
f'{message_prefix}'
f'Local size: {data_bytes_size} Remote size: {metadata.data_size} '
f'Local last modified time: {timestamp_to_date(our_last_write_ts)} '
f'Remote last modified time: {timestamp_to_date(metadata.last_modify_ts)} '
f'Would you like to replace the local DB with the remote one?'
)
return SyncCheckResult(
can_sync=CanSync.ASK_USER,
message=message,
)
def _sync_data_from_server_and_replace_local(self) -> bool:
"""
Performs syncing of data from server and replaces local db
Returns true for success and False for error/failure
May raise:
- PremiumAuthenticationError due to an UnableToDecryptRemoteData
coming from decompress_and_decrypt_db. This happens when the given password
does not match the one on the saved DB.
"""
assert self.premium, 'This function has to be called with a not None premium'
try:
result = self.premium.pull_data()
except RemoteError as e:
log.debug('sync from server -- pulling failed.', error=str(e))
return False
try:
self.data.decompress_and_decrypt_db(self.password, result['data'])
except UnableToDecryptRemoteData:
raise PremiumAuthenticationError(
'The given password can not unlock the database that was retrieved from '
'the server. Make sure to use the same password as when the account was created.',
)
return True
def maybe_upload_data_to_server(self) -> None:
# if user has no premium do nothing
if self.premium is None:
return
# upload only once per hour
diff = ts_now() - self.last_data_upload_ts
if diff < 3600:
return
b64_encoded_data, our_hash = self.data.compress_and_encrypt_db(self.password)
try:
metadata = self.premium.query_last_data_metadata()
except RemoteError as e:
log.debug(
'upload to server stopped -- query last metadata failed',
error=str(e),
)
return
log.debug(
'CAN_PUSH',
ours=our_hash,
theirs=metadata.data_hash,
)
if our_hash == metadata.data_hash:
log.debug('upload to server stopped -- same hash')
# same hash -- no need to upload anything
return
our_last_write_ts = self.data.db.get_last_write_ts()
if our_last_write_ts <= metadata.last_modify_ts:
# Server's DB was modified after our local DB
log.debug('upload to server stopped -- remote db more recent than local')
return
data_bytes_size = len(base64.b64decode(b64_encoded_data))
if data_bytes_size < metadata.data_size:
# Let's be conservative.
# TODO: Here perhaps prompt user in the future
log.debug('upload to server stopped -- remote db bigger than local')
return
try:
self.premium.upload_data(
data_blob=b64_encoded_data,
our_hash=our_hash,
last_modify_ts=our_last_write_ts,
compression_type='zlib',
)
except RemoteError as e:
log.debug('upload to server -- upload error', error=str(e))
return
# update the last data upload value
self.last_data_upload_ts = ts_now()
self.data.db.update_last_data_upload_ts(self.last_data_upload_ts)
log.debug('upload to server -- success')
def try_premium_at_start(
self,
given_premium_credentials: Optional[PremiumCredentials],
username: str,
create_new: bool,
sync_approval: Literal['yes', 'no', 'unknown'],
) -> Optional[Premium]:
"""
Check if new user provided api pair or we already got one in the DB
Returns the created premium if user's premium credentials were fine.
If not it will raise PremiumAuthenticationError.
If no credentials were given it returns None
"""
if given_premium_credentials is not None:
assert create_new, 'We should never get here for an already existing account'
try:
self.premium = premium_create_and_verify(given_premium_credentials)
except PremiumAuthenticationError as e:
log.error('Given API key is invalid')
# At this point we are at a new user trying to create an account with
# premium API keys and we failed. But a directory was created. Remove it.
# But create a backup of it in case something went really wrong
# and the directory contained data we did not want to lose
shutil.move(
self.data.user_data_dir, # type: ignore
self.data.data_directory / f'auto_backup_{username}_{ts_now()}',
)
raise PremiumAuthenticationError(
'Could not verify keys for the new account. '
'{}'.format(str(e)),
)
# else, if we got premium data in the DB initialize it and try to sync with the server
db_credentials = self.data.db.get_rotkehlchen_premium()
if db_credentials:
assert not create_new, 'We should never get here for a new account'
try:
self.premium = premium_create_and_verify(db_credentials)
except PremiumAuthenticationError as e:
message = (
f'Could not authenticate with the rotkehlchen server with '
f'the API keys found in the Database. Error: {str(e)}'
)
log.error(message)
raise PremiumAuthenticationError(message)
if self.premium is None:
return None
result = self._can_sync_data_from_server(new_account=create_new)
if result.can_sync == CanSync.ASK_USER:
if sync_approval == 'unknown':
log.info('DB data at server newer than local')
raise RotkehlchenPermissionError(result.message)
elif sync_approval == 'yes':
log.info('User approved data sync from server')
if self._sync_data_from_server_and_replace_local():
if create_new:
# if we successfully synced data from the server and this is
# a new account, make sure the api keys are properly stored
# in the DB
self.data.db.set_rotkehlchen_premium(self.premium.credentials)
else:
log.debug('Could sync data from server but user refused')
elif result.can_sync == CanSync.YES:
log.info('User approved data sync from server')
if self._sync_data_from_server_and_replace_local():
if create_new:
# if we successfully synced data from the server and this is
# a new account, make sure the api keys are properly stored
# in the DB
self.data.db.set_rotkehlchen_premium(self.premium.credentials)
# else result.can_sync was no, so we do nothing
# Success, return premium
return self.premium
|
the-stack_0_26957
|
#!/usr/bin/env python3
# coding: utf-8
import sfml as sf
RESOLUTION = 1024, 512
TILE_SIZE = 16
TEXTURE_UNKNOWN = sf.Texture.from_file('src/assets/textures/blocks/unknown.png')
ENTITY_MOVE_DURATION = .2
PLAYER_ANIM_INTERVAL = ENTITY_MOVE_DURATION / 3
BIOME_SIZE = 16, 16
|
the-stack_0_26960
|
# field lookup values
OBS_PII_NAME_FIRST = 1585596
OBS_PII_NAME_MIDDLE = 1585597
OBS_PII_NAME_LAST = 1585598
OBS_PII_EMAIL_ADDRESS = 1585260
OBS_PII_PHONE = 1585252
OBS_PII_STREET_ADDRESS_ONE = 1585246
OBS_PII_STREET_ADDRESS_TWO = 1585247
OBS_PII_STREET_ADDRESS_CITY = 1585248
OBS_PII_STREET_ADDRESS_STATE = 1585249
OBS_PII_STREET_ADDRESS_ZIP = 1585250
OBS_PII_CONSENT_PRIMARY_PHONE = None
OBS_PII_BIRTH_DATETIME = 1585259
OBS_PII_SEX = 1585845
SEX_CONCEPT_IDS = {1585846: 'male', 1585847: 'female', 1585848: 'intersex'}
# DRC match responses
MATCH = "match"
MISMATCH = "no_match"
MISSING = "missing"
# Date format strings
DATE = '%Y-%m-%d'
DRC_DATE_FORMAT = '%Y%m%d'
DRC_DATE_REGEX = '\d{8}'
# Table names
OBSERVATION_TABLE = 'observation'
PERSON_TABLE = 'person'
ID_MATCH_TABLE = 'id_match_table'
PII_EMAIL_TABLE = '_pii_email'
PII_PHONE_TABLE = '_pii_phone_number'
PII_ADDRESS_TABLE = '_pii_address'
PII_NAME_TABLE = '_pii_name'
EHR_PERSON_TABLE_SUFFIX = '_person'
VALIDATION_TABLE_SUFFIX = '_identity_match'
# Field names
PERSON_ID_FIELD = 'person_id'
FIRST_NAME_FIELD = 'first_name'
MIDDLE_NAME_FIELD = 'middle_name'
LAST_NAME_FIELD = 'last_name'
EMAIL_FIELD = 'email'
PHONE_NUMBER_FIELD = 'phone_number'
SEX_FIELD = 'sex'
ZIP_CODE_FIELD = 'zip'
STATE_FIELD = 'state'
CITY_FIELD = 'city'
ADDRESS_ONE_FIELD = 'address_1'
ADDRESS_TWO_FIELD = 'address_2'
BIRTH_DATE_FIELD = 'birth_date'
GENDER_FIELD = 'gender_concept_id'
BIRTH_DATETIME_FIELD = 'birth_datetime'
VALIDATION_FIELDS = [
FIRST_NAME_FIELD, MIDDLE_NAME_FIELD, LAST_NAME_FIELD,
EMAIL_FIELD, PHONE_NUMBER_FIELD, ZIP_CODE_FIELD, STATE_FIELD,
CITY_FIELD, ADDRESS_ONE_FIELD, ADDRESS_TWO_FIELD,
BIRTH_DATE_FIELD, SEX_FIELD
]
# Report names and directories
REPORT_TITLE = 'id-validation.csv'
REPORT_DIRECTORY = 'drc-validations-{date}'
REPORT_DIRECTORY_REGEX = 'drc-validations-\d{8}'
# Validation dataset name
DESTINATION_DATASET_DESCRIPTION = '{version} {rdr_dataset} + {ehr_dataset}'
|
the-stack_0_26961
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FiniteDiscrete distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'FiniteDiscrete',
]
class FiniteDiscrete(distribution.Distribution):
"""The finite discrete distribution.
The FiniteDiscrete distribution is parameterized by either probabilities or
log-probabilities of a set of `K` possible outcomes, which is defined by
a strictly ascending list of `K` values.
Note: log_prob, prob, cdf, mode, and entropy are differentiable with respect
to `logits` or `probs` but not with respect to `outcomes`.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(x; pi, qi) = prod_j pi_j**[x == qi_j]
```
#### Examples
```python
# Initialize a discrete distribution with 4 possible outcomes and the 2nd
# outcome being most likely.
dist = FiniteDiscrete([1., 2., 4., 8.], probs=[0.1, 0.4, 0.3, 0.2])
dist.prob(2.)
# ==> 0.4
# Using logits to initialize a discrete distribution with 4 possible outcomes
# and the 2nd outcome being most likely.
dist = FiniteDiscrete([1., 2., 4., 8.], logits=np.log([0.1, 0.4, 0.3, 0.2]))
dist.prob(2.)
# ==> 0.4
```
"""
def __init__(self,
outcomes,
logits=None,
probs=None,
rtol=None,
atol=None,
validate_args=False,
allow_nan_stats=True,
name='FiniteDiscrete'):
"""Construct a finite discrete contribution.
Args:
outcomes: A 1-D floating or integer `Tensor`, representing a list of
possible outcomes in strictly ascending order.
logits: A floating N-D `Tensor`, `N >= 1`, representing the log
probabilities of a set of FiniteDiscrete distributions. The first `N -
1` dimensions index into a batch of independent distributions and the
last dimension represents a vector of logits for each discrete value.
Only one of `logits` or `probs` should be passed in.
probs: A floating N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of FiniteDiscrete distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each discrete value. Only one
of `logits` or `probs` should be passed in.
rtol: `Tensor` with same `dtype` as `outcomes`. The relative tolerance for
floating number comparison. Only effective when `outcomes` is a floating
`Tensor`. Default is `10 * eps`.
atol: `Tensor` with same `dtype` as `outcomes`. The absolute tolerance for
floating number comparison. Only effective when `outcomes` is a floating
`Tensor`. Default is `10 * eps`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may render incorrect outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value '`NaN`' to indicate the
result is undefined. When `False`, an exception is raised if one or more
of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
outcomes_dtype = dtype_util.common_dtype(
[outcomes], dtype_hint=tf.float32)
self._outcomes = tensor_util.convert_nonref_to_tensor(
outcomes, dtype_hint=outcomes_dtype, name='outcomes')
if dtype_util.is_floating(self._outcomes.dtype):
eps = np.finfo(dtype_util.as_numpy_dtype(outcomes_dtype)).eps
self._rtol = 10 * eps if rtol is None else rtol
self._atol = 10 * eps if atol is None else atol
else:
self._rtol = None
self._atol = None
self._categorical = categorical.Categorical(
logits=logits,
probs=probs,
dtype=tf.int32,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
super(FiniteDiscrete, self).__init__(
dtype=self._outcomes.dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _params_event_ndims(cls):
# outcomes is currently not sliceable.
return dict(logits=1, probs=1)
@property
def outcomes(self):
return self._outcomes
@property
def logits(self):
"""Input argument `logits`."""
return self._categorical.logits
@property
def probs(self):
"""Input argument `probs`."""
return self._categorical.probs
def _batch_shape_tensor(self):
return self._categorical.batch_shape_tensor()
def _batch_shape(self):
return self._categorical.batch_shape
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _cdf(self, x):
x = tf.convert_to_tensor(x, name='x')
flat_x = tf.reshape(x, shape=[-1])
upper_bound = tf.searchsorted(self.outcomes, values=flat_x, side='right')
values_at_ub = tf.gather(
self.outcomes,
indices=tf.minimum(upper_bound,
prefer_static.shape(self.outcomes)[-1] - 1))
should_use_upper_bound = self._is_equal_or_close(flat_x, values_at_ub)
indices = tf.where(should_use_upper_bound, upper_bound, upper_bound - 1)
indices = tf.reshape(indices, shape=dist_util.prefer_static_shape(x))
indices_non_negative = tf.where(
tf.equal(indices, -1), tf.zeros([], indices.dtype), indices)
cdf = self._categorical.cdf(indices_non_negative)
return tf.where(tf.equal(indices, -1), tf.zeros([], cdf.dtype), cdf)
def _entropy(self):
return self._categorical.entropy()
def _is_equal_or_close(self, a, b):
if dtype_util.is_integer(self.outcomes.dtype):
return tf.equal(a, b)
return tf.abs(a - b) < self._atol + self._rtol * tf.abs(b)
def _log_prob(self, x):
x = tf.convert_to_tensor(x, name='x')
right_indices = tf.minimum(
tf.size(self.outcomes) - 1,
tf.reshape(
tf.searchsorted(
self.outcomes, values=tf.reshape(x, shape=[-1]), side='right'),
prefer_static.shape(x)))
use_right_indices = self._is_equal_or_close(
x, tf.gather(self.outcomes, indices=right_indices))
left_indices = tf.maximum(0, right_indices - 1)
use_left_indices = self._is_equal_or_close(
x, tf.gather(self.outcomes, indices=left_indices))
log_probs = self._categorical.log_prob(
tf.where(use_left_indices, left_indices, right_indices))
return tf.where(
tf.logical_not(use_left_indices | use_right_indices),
dtype_util.as_numpy_dtype(log_probs.dtype)(-np.inf),
log_probs)
def _mean(self, probs=None):
if probs is None:
probs = self._categorical.probs_parameter()
outcomes = self.outcomes
if dtype_util.is_integer(outcomes.dtype):
if self._validate_args:
outcomes = dist_util.embed_check_integer_casting_closed(
outcomes, target_dtype=probs.dtype)
outcomes = tf.cast(outcomes, dtype=probs.dtype)
return tf.tensordot(outcomes, probs, axes=[[0], [-1]])
def _mode(self):
return tf.gather(self.outcomes, indices=self._categorical.mode())
def _sample_n(self, n, seed=None, **distribution_kwargs):
return tf.gather(
self.outcomes,
indices=self._categorical.sample(
sample_shape=[n], seed=seed, **distribution_kwargs))
def _variance(self):
probs = self._categorical.probs_parameter()
outcomes = tf.broadcast_to(self.outcomes, shape=prefer_static.shape(probs))
if dtype_util.is_integer(outcomes.dtype):
if self._validate_args:
outcomes = dist_util.embed_check_integer_casting_closed(
outcomes, target_dtype=probs.dtype)
outcomes = tf.cast(outcomes, dtype=probs.dtype)
square_d = tf.math.squared_difference(
outcomes, self._mean(probs)[..., tf.newaxis])
return tf.reduce_sum(probs * square_d, axis=-1)
def logits_parameter(self, name=None):
"""Logits vec computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'logits_parameter'):
return self._categorical.logits_parameter()
def probs_parameter(self, name=None):
"""Probs vec computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'probs_parameter'):
return self._categorical.probs_parameter()
def _default_event_space_bijector(self):
return
def _parameter_control_dependencies(self, is_init):
assertions = []
# For `logits` and `probs`, we only want to have an assertion on what the
# user actually passed. For now, we access the underlying categorical's
# _logits and _probs directly. After the 2019-10-01 deprecation, it would
# also work to use .logits() and .probs().
logits = self._categorical._logits
probs = self._categorical._probs
outcomes = self._outcomes
validate_args = self._validate_args
# Build all shape and dtype checks during the `is_init` call.
if is_init:
def validate_equal_last_dim(tensor_a, tensor_b, message):
event_size_a = tf.compat.dimension_value(tensor_a.shape[-1])
event_size_b = tf.compat.dimension_value(tensor_b.shape[-1])
if event_size_a is not None and event_size_b is not None:
if event_size_a != event_size_b:
raise ValueError(message)
elif validate_args:
return assert_util.assert_equal(
tf.shape(tensor_a)[-1], tf.shape(tensor_b)[-1], message=message)
message = 'Size of outcomes must be greater than 0.'
if tensorshape_util.num_elements(outcomes.shape) is not None:
if tensorshape_util.num_elements(outcomes.shape) == 0:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.assert_greater(tf.size(outcomes), 0, message=message))
if logits is not None:
maybe_assert = validate_equal_last_dim(
outcomes,
# pylint: disable=protected-access
self._categorical._logits,
# pylint: enable=protected-access
message='Last dimension of outcomes and logits must be equal size.')
if maybe_assert:
assertions.append(maybe_assert)
if probs is not None:
maybe_assert = validate_equal_last_dim(
outcomes,
probs,
message='Last dimension of outcomes and probs must be equal size.')
if maybe_assert:
assertions.append(maybe_assert)
message = 'Rank of outcomes must be 1.'
ndims = tensorshape_util.rank(outcomes.shape)
if ndims is not None:
if ndims != 1:
raise ValueError(message)
elif validate_args:
assertions.append(assert_util.assert_rank(outcomes, 1, message=message))
if not validate_args:
assert not assertions # Should never happen.
return []
if is_init != tensor_util.is_ref(outcomes):
assertions.append(
assert_util.assert_equal(
tf.math.is_strictly_increasing(outcomes),
True,
message='outcomes is not strictly increasing.'))
return assertions
|
the-stack_0_26964
|
from typing import Any, Dict, Iterable, Iterator, List, Optional, Union
import torch
import torch.distributed as dist
# Import the entire FSDP file to avoid circular imports
import torch.distributed.fsdp.fully_sharded_data_parallel as FSDP
from torch.distributed.fsdp.flatten_params_wrapper import FlatParameter
OPTIM_TARGET_RANK = 0 # rank on which to save full optimizer state
class ConsolidatedOptimState:
"""
This holds the consolidated optimizer state on the target rank. Positive-
dimension tensor state is communicated across ranks, while zero-dimension
tensor state and non-tensor state is taken directly from the target rank.
PyTorch version 1.12 moved to using zero-dimension tensors for scalar
values, but user implemented optimizers may still use float (i.e. a
non-tensor). Thus, we support both and handle them identically.
Attributes:
tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension
tensor state name to the unsharded flattened tensor representing
the state.
zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero-
dimension tensor state name to its value.
non_tensor_state (Dict[str, Any]): Mapping from non-tensor state
name to its value.
"""
tensor_state: Dict[str, torch.Tensor] = {}
zero_dim_tensor_state: Dict[str, torch.Tensor] = {}
non_tensor_state: Dict[str, Any] = {}
def _unflatten_optim_state(
fsdp_module,
flat_param: FlatParameter,
flat_param_state: Dict[str, Any],
) -> List[Dict[str, Any]]:
"""
Unflattens the optimizer state, consisting of the "state" part and the
"param_groups" part. Unflattening the "state" part involves consolidating
the state on the target rank and remapping from flattened to unflattened
parameter IDs, and the "param_groups" part only involves remapping from
flattened to unflattened parameter IDs.
Args:
fsdp_module (FullyShardedDataParallel): FSDP module that owns
``flat_param``, i.e. holds it in ``self.params``.
flat_param (FlatParameter): The flattened parameter.
flat_param_state (Dict[str, Any]): Entry for the flattened parameter
in the "state" part of the optimizer state dict.
Returns:
unflat_param_state (List[Dict[str, Any]]): A :class:`list` holding
the entries in the "state" part of the optimizer state dict
corresponding to the unflattened parameters comprising the
flattened parameter ``flat_param`` if on the target rank or an
empty :class:`list` otherwise. The final optimizer state dict will
need to map these entries using the proper unflattened parameter
IDs.
"""
assert sum(p is flat_param for p in fsdp_module.params) == 1, \
"`fsdp_module` must own `flat_param`"
consolidated_state = _communicate_optim_state(
fsdp_module, flat_param, flat_param_state,
)
to_save = fsdp_module.rank == OPTIM_TARGET_RANK
unflat_param_state = _unflatten_communicated_optim_state(
fsdp_module,
flat_param,
consolidated_state,
) if to_save else []
return unflat_param_state
def _communicate_optim_state(
fsdp_module,
flat_param: FlatParameter,
flat_param_state: Dict[str, Any],
) -> ConsolidatedOptimState:
"""
Communicates the optimizer state for a flattened parameter ``flat_param``
across ranks so that the target rank holds the entire non-sharded optimizer
state.
If ``N`` is the number of tensor optimizer states in the optimizer state
dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1``
otherwise (where the plus 1 comes from all-gathering the padding per rank).
Args:
flat_param (FlatParameter): The flattened parameter.
flat_param_state (Dict[str, Any]): The entry in the "state" part of
the optimizer state dict corresponding to the flattened parameter.
Returns:
state (ConsolidatedOptimState): Consolidated optimizer state for
``flat_param``; the state is not populated for non-target ranks.
"""
param_index = -1
for i, param in enumerate(fsdp_module.params):
if param is flat_param:
param_index = i
break
assert param_index >= 0, "`fsdp_module` must own `flat_param`"
state = ConsolidatedOptimState()
tensor_state, zero_dim_tensor_state, non_tensor_state = \
state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state
process_group = fsdp_module.process_group
tensor_buffer = None # initialize lazily in case it is not needed
to_save = fsdp_module.rank == OPTIM_TARGET_RANK
for state_name, value in flat_param_state.items():
# Positive-dimension tensor state: communicate across ranks
if torch.is_tensor(value) and value.dim() > 0:
# If the parameter is not sharded (e.g. world size of 1), then
# neither is the positive-dimension tensor state, so no need to
# communicate it -- we take the target rank's value
if not flat_param._is_sharded:
tensor_state[state_name] = value.cpu()
continue
if tensor_buffer is None:
# Assume that positive-dimension tensor optimizer state
# has the same shape as the sharded flattened parameter
buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined]
tensor_buffer = value.new_zeros(*buffer_size)
dist._all_gather_base(tensor_buffer, value, group=process_group)
if to_save:
assert hasattr(flat_param, "_orig_size"), \
"Sharded flattened parameter should have `_orig_size` set"
unpadded_numel = flat_param._orig_size.numel() # type: ignore[attr-defined]
tensor_state[state_name] = tensor_buffer[:unpadded_numel].cpu()
# Zero-dimension tensor state and non-tensor state: take this rank's
# value directly
elif to_save:
if _is_zero_dim_tensor(value):
zero_dim_tensor_state[state_name] = value.cpu()
else:
non_tensor_state[state_name] = value
return state
def _unflatten_communicated_optim_state(
fsdp_module,
flat_param: FlatParameter,
state: ConsolidatedOptimState,
) -> List[Dict[str, Any]]:
"""
Unflattens the communicated optimizer state (given by ``tensor_state``,
``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flattened
parameter ``flat_param``. This should only be called on the target rank.
Args:
fsdp_module (FullyShardedDataParallel): FSDP module that owns
``flat_param``, i.e. holds it in ``self.params``.
flat_param (FlatParameter): The flattened parameter.
state (ConsolidatedOptimState): Consolidated optimizer state.
Returns:
unflat_param_state (List[Dict[str, Any]]): A :class:`list` holding
the entries in the "state" part of the optimizer state dict
corresponding to the unflattened parameters comprising the
flattened parameter ``flat_param``. The final optimizer state dict
will need to map these entries using the proper unflattened
parameter IDs.
"""
assert sum(p is flat_param for p in fsdp_module.params) == 1, \
"`fsdp_module` must own `flat_param`"
unflat_param_state: List[Dict[str, Any]] = []
flat_param_views: Dict[str, Iterator] = {}
num_unflat_params = flat_param._num_unflattened_params
tensor_state, zero_dim_tensor_state, non_tensor_state = \
state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state
for _ in range(num_unflat_params):
unflat_state_param = {}
# Add positive-dimension tensor state: unflatten with views
for state_name, flat_tensor in tensor_state.items():
views_generated = state_name in flat_param_views
if not views_generated:
param_views = flat_param.get_param_views(flat_tensor)
flat_param_views[state_name] = param_views
else:
param_views = flat_param_views[state_name]
unflat_state_param[state_name] = next(param_views)
# Add zero-dimension tensor state: take the target rank's value
for state_name, zero_dim_tensor in zero_dim_tensor_state.items():
unflat_state_param[state_name] = zero_dim_tensor
# Add non-tensor state: take the target rank's value
for state_name, non_tensor in non_tensor_state.items():
unflat_state_param[state_name] = non_tensor
unflat_param_state.append(unflat_state_param)
return unflat_param_state
def _flatten_optim_state(
unflat_osd_state: Dict[str, Dict[str, Any]],
unflat_param_names: List[str],
fsdp_module,
flat_param: FlatParameter,
) -> Dict[str, Any]:
"""
Flattens the optimizer state in ``full_optim_state_dict`` for a single
flattened parameter ``flat_param`` in ``fsdp_module`` corresponding to
the unflattened parameter names in ``unflat_param_names``.
Args:
unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the
optimizer state dict corresponding to the unflattened parameters.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the flattened parameter
``flat_param``.
fsdp_module (FullyShardedDataParallel): FSDP module owning the
flattened parameter.
flat_param (FlatParameter): The flattened parameter.
Returns:
flat_state (Dict[str, Any]): A :class:`dict` mapping state names to
their values for a particular flattened parameter. The sharded
optimizer state dict's "state" part will map the flattened
parameter ID to this returned value.
"""
num_unflat_params = len(unflat_param_names)
assert num_unflat_params > 0, \
"Expects at least one unflattened parameter corresponding to the " \
"flattened parameter"
unflat_param_shapes = flat_param._param_shapes
num_unflat_param_shapes = len(unflat_param_shapes)
assert num_unflat_params == num_unflat_param_shapes, \
f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}"
# Check if these unflattened parameters have any optimizer state
has_state = [
bool(unflat_param_name in unflat_osd_state)
for unflat_param_name in unflat_param_names
]
# If none of the unflattened parameters comprising this flattened parameter
# have any state, then we do not want an entry in the optimizer state dict
if not any(has_state):
return {} # no need to flatten any state
# There may still be some unflattened parameters with state and some
# without
unflat_param_states = [
unflat_osd_state[unflat_param_name]
if unflat_param_name in unflat_osd_state else None
for unflat_param_name in unflat_param_names
]
# Check that the unflattened parameters have the same state names
state_names = None
for unflat_param_state in unflat_param_states:
if unflat_param_state is None:
continue
if state_names is None:
state_names = set(unflat_param_state.keys())
else:
if state_names != set(unflat_param_state.keys()):
raise ValueError(
"Differing optimizer state names for the unflattened "
f"parameters: {unflat_param_names}"
)
assert state_names is not None
# Flatten the state
flat_state: Dict[str, Any] = {}
for state_name in state_names:
state_values = [
unflat_param_state[state_name]
if unflat_param_state is not None else None
for unflat_param_state in unflat_param_states
]
non_none_state_values = [v for v in state_values if v is not None]
are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True
for v in non_none_state_values:
are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0
are_zero_dim_tensors &= _is_zero_dim_tensor(v)
are_non_tensors &= not torch.is_tensor(v)
types = set(type(v) for v in non_none_state_values)
if len(types) != 1 or not (
are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors
):
raise ValueError(
f"Differing optimizer state types for state {state_name}, "
f"values {non_none_state_values}, and unflattened parameter "
f"names {unflat_param_names}"
)
if are_pos_dim_tensors:
flat_tensor = _flatten_tensor_optim_state(
state_name, state_values, unflat_param_names,
unflat_param_shapes, flat_param,
)
# Shard the flattened tensor immediately to minimize the max memory
# usage
sharded_flat_tensor, _ = fsdp_module._get_shard(flat_tensor)
flat_state[state_name] = sharded_flat_tensor
elif are_zero_dim_tensors:
flat_state[state_name] = _flatten_zero_dim_tensor_optim_state(
state_name, state_values, unflat_param_names,
)
else:
assert are_non_tensors
flat_state[state_name] = _flatten_non_tensor_optim_state(
state_name, state_values, unflat_param_names,
)
return flat_state
def _flatten_tensor_optim_state(
state_name: str,
pos_dim_tensors: List[torch.Tensor],
unflat_param_names: List[str],
unflat_param_shapes: List[torch.Size],
flat_param: FlatParameter,
) -> torch.Tensor:
"""
Flattens the positive-dimension tensor optimizer state given by the values
``tensors`` for the state ``state_name`` for a single flattened parameter
``flat_param`` corresponding to the unflattened parameter names
``unflat_param_names`` and unflatted parameter shapes
``unflat_param_shapes``. This flattens each unflattened parameter's tensor
state into one tensor.
NOTE: We use zero tensors for any unflattened parameters without state
since some value is required to fill those entries. This assumes that the
zero tensor is mathematically equivalent to having no state, which is true
for Adam's ``exp_avg`` and ``exp_avg_sq`` but may not be true for all
optimizers.
Args:
state_name (str): Optimizer state name.
pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
optimizer state values for the unflattened parameters corresponding
to the single flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
corresponding to the single flattened parameter.
flat_param (FlatParameter): The flattened parameter.
Returns:
flat_tensor (torch.Tensor): A flattened tensor containing the optimizer
state corresponding to ``state_name`` constructed by concatenating
the unflattened parameter tensor states in ``pos_dim_tensors``
(using zero tensors for any unflattened parameters without the
state).
"""
non_none_tensors = [t for t in pos_dim_tensors if t is not None]
# Check that all are tensors with the same dtype
dtypes = set(t.dtype for t in non_none_tensors)
if len(dtypes) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have positive-dimension tensor state with the "
f"same dtype but got dtypes {dtypes} for state {state_name} and "
f"unflattened parameter names {unflat_param_names}"
)
dtype = next(iter(dtypes))
# Check that each tensor state matches its parameter's shape
for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
if tensor is None and len(shape) == 0:
raise ValueError(
"Flattening a zero-dimension parameter is not supported"
)
elif tensor is not None and tensor.shape != shape:
raise ValueError(
"Tensor optimizer state does not have same shape as its "
f"parameter: {tensor.shape} {shape}"
)
# Flatten the tensor states
cpu_device = torch.device("cpu")
tensors = [
torch.flatten(state_value.to(cpu_device)) if state_value is not None
else torch.flatten(torch.zeros(
size=shape, dtype=dtype, device=cpu_device,
))
for state_value, shape
in zip(pos_dim_tensors, unflat_param_shapes)
]
padding = flat_param.num_padded
if padding > 0:
tensors.append(torch.zeros(padding, dtype=dtype, device=cpu_device))
flat_tensor = torch.cat(tensors)
# `flat_tensor`'s shape should be 1D and less than or equal to the
# flattened parameter's shape (where the inequality is strict for positive
# padding)
if not flat_param._is_sharded: # currently, only when world size is 1
# If the parameter is not sharded, then `_full_param_padded` is not
# used, so we skip the shape check
return flat_tensor
full_padded_dim = flat_param._full_param_padded.dim() # type: ignore[attr-defined]
full_padded_shape = flat_param._full_param_padded.shape # type: ignore[attr-defined]
assert flat_tensor.dim() == 1, \
f"`flat_tensor` should be 1D but got {flat_tensor.dim()} dims"
assert full_padded_dim == 1, \
f"`_full_param_padded` should be 1D but got {full_padded_dim} dims"
assert flat_tensor.shape[0] <= full_padded_shape[0], \
f"tensor optim state: {flat_tensor.shape} " \
f"parameter: {full_padded_shape}"
return flat_tensor
def _flatten_zero_dim_tensor_optim_state(
state_name: str,
zero_dim_tensors: List[torch.Tensor],
unflat_param_names: List[str],
) -> torch.Tensor:
"""
Flattens the zero-dimension tensor optimizer state given by the values
``zero_dim_tensors`` for the state ``state_name`` for a single flattened
parameter corresponding to the unflattened parameter names
``unflat_param_names`` by enforcing that all tensors are the same and using
that common value.
NOTE: The requirement that the tensors are the same across all unflattened
parameters comprising the flattened parameter is needed to maintain the
invariant that FSDP performs the same computation as its non-sharded
equivalent. This means that none of the unflattened parameters can be
missing this state since imposing a value may differ from having no value.
For example, for Adam's "step", no value means maximum bias correction,
while having some positive value means less bias correction.
Args:
state_name (str): Optimizer state name.
zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state
for the unflattened parameters corresponding to the single
flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
Returns:
zero_dim_tensor (torch.Tensor): A zero-dimensional tensor giving the
value of the state ``state_name`` for all unflattened parameters
corresponding to the names ``unflat_param_names``.
"""
non_none_tensors = [t for t in zero_dim_tensors if t is not None]
# Enforce that all have the same value and dtype
values_set = set(t.item() for t in zero_dim_tensors)
dtypes = set(t.dtype for t in zero_dim_tensors)
if len(non_none_tensors) != len(zero_dim_tensors) or \
len(values_set) != 1 or len(dtypes) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have scalar state with the same value and dtype "
f"but got values {values_set} and dtypes {dtypes} for state "
f"{state_name} and unflattened parameter names "
f"{unflat_param_names}"
)
value = next(iter(values_set))
dtype = next(iter(dtypes))
return torch.tensor(value, dtype=dtype, device=torch.device("cpu"))
def _flatten_non_tensor_optim_state(
state_name: str,
non_tensors: List[Any],
unflat_param_names: List[str],
) -> Any:
"""
Flattens the non-tensor optimizer state given by the values ``non_tensors``
for the state ``state_name`` for a single flattened parameter corresponding
to the unflattened parameter names ``unflat_param_names`` by enforcing that
all values are the same and using that common value.
See the note in :func:`_flatten_zero_dim_tensor_optim_state`.
Args:
state_name (str): Optimizer state name.
non_tensors (List[Any]): Non-tensor optimizer state for the unflattened
parameters corresponding to the single flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
Returns:
non_tensor (Any): A non-tensor giving the value of the state
``state_name`` for all unflattened parameters corresponding to the
names ``unflat_param_names``.
"""
non_none_non_tensors = [nt for nt in non_tensors if nt is not None]
# Enforce that all have the same value (same type already checked)
non_tensor_set = set(non_tensors)
if len(non_none_non_tensors) != len(non_tensors) or \
len(non_tensor_set) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have scalar state with the same value and dtype "
f"but got values {non_tensor_set} for state {state_name} and "
f"unflattened parameter names {unflat_param_names}"
)
non_tensor = next(iter(non_tensor_set))
return non_tensor
def _get_flat_param_to_fsdp_module(
model: torch.nn.Module,
):
"""
Constructs a mapping from FSDP flattened parameters to their owning FSDP
modules and ensures that all FSDP modules are initialized.
Args:
model (torch.nn.model): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance).
"""
flat_param_to_fsdp_module = {}
for module in model.modules():
if isinstance(module, FSDP.FullyShardedDataParallel):
module._lazy_init()
for param in module.params: # may have none
flat_param_to_fsdp_module[param] = module
return flat_param_to_fsdp_module
def _get_param_id_to_param(
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> List[torch.nn.Parameter]:
"""
Constructs a mapping from parameter IDs to parameters. This may be used
both for models with ``FlatParameter`` s and without.
NOTE: We critically assume that, whether the optimizer input is a list of
parameters or a list of parameter groups, :class:`torch.optim.Optimizer`
enumerates the parameter IDs in order. In other words, for a parameter list
input, the parameter IDs should be in that list order, and for a parameter
groups input, the parameter IDs should be in order within each parameter
group and in order across parameter groups.
Args:
model (torch.nn.Module): Model whose parameters are passed into the
optimizer.
optim_input (Optional[Union[List[Dict[str, Any]],
Iterable[torch.nn.Parameter]]]): Input passed into the optimizer
representing either a :class:`list` of parameter groups or an
iterable of parameters; if ``None``, then this method assumes the
input was ``model.parameters()``. (Default: ``None``)
Returns:
param_id_to_param (List[torch.nn.Parameter]): Mapping from parameter
IDs to parameters, where the parameter ID is implicitly the index
in the :class:`list`.
"""
# Assume the standard case of passing `model.parameters()` to the optimizer
# if `optim_input` is not specified
if optim_input is None:
return list(model.parameters())
try:
params = list(optim_input)
except TypeError:
raise TypeError(
"Optimizer input should be an iterable of Tensors or dicts, "
f"but got {optim_input}"
)
if len(params) == 0:
raise ValueError("Optimizer input should not be empty")
# Check if the optimizer input represents tensors or parameter groups
all_tensors = True
all_dicts = True
for param in params:
all_tensors &= isinstance(param, torch.Tensor)
all_dicts &= isinstance(param, dict)
if not all_tensors and not all_dicts:
raise TypeError(
"Optimizer input should be an iterable of Tensors or dicts"
)
if all_tensors:
return params # type: ignore[return-value]
assert all_dicts
param_id_to_param = []
for param_group in params:
has_params_key = "params" in param_group # type: ignore[operator]
assert has_params_key, \
"A parameter group should map \"params\" to a list of the " \
"parameters in the group"
for param in param_group["params"]: # type: ignore[index]
# Implicitly map `flat_param_id` (current length of the list) to
# `param`
param_id_to_param.append(param)
return param_id_to_param # type: ignore[return-value]
def _get_param_to_param_id(
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> Dict[torch.nn.Parameter, int]:
"""Constructs the inverse mapping of :func:`_get_param_id_to_param`."""
param_id_to_param = _get_param_id_to_param(model, optim_input)
return {
param: param_id for param_id, param in enumerate(param_id_to_param)
}
def _get_unflat_to_flat_param_ids(
flat_to_unflat_param_ids: Dict[int, List[int]],
) -> List[int]:
"""
Inverts the mapping ``flat_to_unflat_param_ids`` to be from unflattened
parameter ID to flattened parameter ID, where the unflattened parameter ID
is the index in the returned :class:`list`. There may be multiple
unflattened parameter IDs mapping to the same flattened parameter ID.
Args:
flat_to_unflat_param_ids (Dict[int, List[int]]): A mapping from
flattened parameter ID to a :class:`list` of corresponding
unflattened parameter IDs.
Returns:
unflat_to_flat_param_ids (List[int]): A mapping from unflattened
parameter ID to flattened parameter ID, where the unflattened
parameter ID is the index in the :class:`list`.
"""
# Construct as a dict and then convert to list
unflat_to_flat_param_ids = {}
for flat_param_id, unflat_param_ids in flat_to_unflat_param_ids.items():
for unflat_param_id in unflat_param_ids:
assert unflat_param_id not in unflat_to_flat_param_ids, \
"`flat_to_unflat_param_ids` has the unflattened parameter " \
f"ID {unflat_param_id} mapped to multiple flattened " \
"parameter IDs"
unflat_to_flat_param_ids[unflat_param_id] = flat_param_id
num_unflat_param_ids = len(unflat_to_flat_param_ids)
unflat_param_ids_set = set(unflat_to_flat_param_ids.keys())
assert unflat_param_ids_set == set(range(num_unflat_param_ids)), \
"The set of unflattened parameter IDs should be {0, ..., " + \
str(num_unflat_param_ids - 1) + "} but got " + \
f"{unflat_param_ids_set}"
return [
unflat_to_flat_param_ids[unflat_param_id]
for unflat_param_id in range(num_unflat_param_ids)
]
def _is_zero_dim_tensor(x: Any) -> bool:
return torch.is_tensor(x) and x.dim() == 0
|
the-stack_0_26965
|
# -*- coding: utf-8 -*-
import unetsl.data
import numpy
def upsample(stack, shape):
"""
upsamples the last three dimensions and keeps the other dimensions
unchanged.
"""
scale = [o//i for o,i in zip(shape[-3:], stack.shape[-3:])]
return numpy.kron(stack, numpy.ones(tuple(scale)))
def createSliceTuple(origin, size):
ret = []
for i,j in zip(origin, size):
ret.append(slice(i, j+i))
return tuple(ret)
MODEL_KEY = "model file"
IMG_KEY = "image to predict"
OUT_KEY = "output image"
REDUCTION_TYPE = "reduction type"
DEBUG = "debug"
OUTPUT_INDEX = "output index"
LAYER_SHAPER = "layer shaper"
CATEGORICAL_REDUCTION = 0
MULTICLASS_REDUCTION = 1
LINEAR_REDUCTION = 2
def cropShaper(batch, target_shape):
"""
The target shape should the shape of the destination image.
A batch should be (N, C, Z, Y, X) and target_shape should be
(N, C, Z*, Y*, X*)
Args:
batch (numpy.array): data to be reshaped in the space dimensions.
target_shape ( [int, ...]): target shape, only last 3 dimensions are used.
Return:
A zero padded version of batch.
"""
#possibly a list.
target_shape = tuple(target_shape[-3:])
#total dimensions - should be 5
dims = len(batch.shape)
#patch dimensions, currently 3.
pls = len(target_shape)
fill = dims - pls
view = batch.shape[-pls:]
offset = tuple( (ti - vi)//2 for vi, ti in zip(view, target_shape) )
lows = (0, )*fill + offset
span = batch.shape[:fill] + view
highs = tuple(l + s for l, s in zip(lows, span))
slc = tuple(slice(l, h) for l, h in zip(lows, highs) )
shape = batch.shape[ : fill ] + target_shape
out = numpy.zeros(shape, dtype=batch.dtype)
out[slc] = batch
return out
def getShaper(shaper_name):
"""
cannot know the shaper without knowing the head!
"""
if shaper_name=="upsample":
return upsample
if shaper_name == "crop":
return cropShaper
class LayerShaper:
def __init__(self):
pass
def __call__(stack, shape):
"""
shape is the desired output shape and stack is a batch of data,
expected shape. (n, c, z, y, x)
(n, c, z, y, x)
"""
pass
DEFAULT_REDUCTION_TYPES = {
"Sigmoid" : MULTICLASS_REDUCTION,
"Softmax" : CATEGORICAL_REDUCTION,
"Relu" : LINEAR_REDUCTION
}
def guessReduction(output):
try:
return DEFAULT_REDUCTION_TYPES[output.op.type]
except:
pass
return MULTICLASS_REDUCTION
def guessOutputReductionTypes(model):
guessed_types = {}
om = unetsl.model.getOutputMap(model)
for key in om:
guessed_types[key] = guessReduction(om[key])
print("guessed reductions")
return tuple( guessed_types[key] for key in guessed_types)
class MultiChannelPredictor:
def __init__(self, model, image, reduction_types =[ ], stride=None, sample_normalize=False, batch_size=2, debug=False, GPUS=1, layer_shapers=[upsample,]):
self.model = model
self.image = image
self.reduction_types = reduction_types
self.stride = stride
self.sample_normalize = sample_normalize
self.batch_size = batch_size
self.debug = debug
self.GPUS = GPUS
self.layer_shapers=layer_shapers
def predict(self):
return self.predictImage(self.image);
def predictImage(self, image):
if len(self.reduction_types) < 1:
self.reduction_types = guessOutputReductionTypes(self.model)
return predictMultiChannelImage(
self.model,
image,
reduction_type = self.reduction_types,
stride = self.stride,
sample_normalize = self.sample_normalize,
batch_size = self.batch_size,
debug = self.debug,
GPUS = self.GPUS,
shapers = self.layer_shapers
)
def predictionToLabelledImage(prediction, reduction_type, labelled_shape):
"""
recieves a prediction (labels, z, y, x) and returns a labeled image.
(1, z, y, x), probabilities
"""
if reduction_type==CATEGORICAL_REDUCTION:
probabilities = numpy.max(prediction, axis=0)
dexes = numpy.argmax(prediction, axis=0)
labelled = 1<<numpy.array(dexes, dtype='uint8')
elif reduction_type==MULTICLASS_REDUCTION:
labelled = numpy.zeros(labelled_shape, dtype='uint8')
for label in range(prediction.shape[0]):
patch = numpy.array((prediction[label]>0.5)*(1<<label), dtype='uint16')
labelled |= patch
probabilities = numpy.max(prediction, axis=0, keepdims=True)
elif reduction_type==LINEAR_REDUCTION:
labelled = numpy.array((prediction), dtype='uint16')
probabilities = None
return labelled, probabilities
def generateProbabilityWindow(input_shape, labelled_shape, stride):
body = 1.0
face = 0.75
edge = 0.5
corner = 0.25
window = numpy.zeros(labelled_shape, dtype='float32') + edge
r0 = (input_shape[1:] - stride)//2
important = createSliceTuple(r0, stride)
window[0][important] = body
#corner at origin
ox = 0
oy = 0
oz = 0
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
oy = r0[1] + stride[1]
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
oz = r0[0] + stride[0]
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
oy = 0
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
ox = r0[2] + stride[2]
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
oz = 0
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
oy = r0[1] + stride[1]
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
oz = r0[0] + stride[0]
window[0][ createSliceTuple( (ox, oy, oz ), r0 ) ] = corner
#xfaces
xface_shape = (stride[0], stride[1], r0[2])
window[0][ createSliceTuple( (r0[0], r0[1], 0), xface_shape ) ] = face
window[0][ createSliceTuple( (r0[0], r0[1], stride[2] + r0[2]), xface_shape ) ] = face
#yfaces
yface_shape = (stride[0], r0[1], stride[2])
window[0][ createSliceTuple((r0[0], 0, r0[2]), yface_shape)] = face
window[0][ createSliceTuple( (r0[0], stride[1] + r0[1], r0[2]), yface_shape )] = face
#zfaces
zface_shape = (r0[0], stride[1], stride[2])
window[0][ createSliceTuple( (0, r0[1], r0[2]), zface_shape ) ] = face
window[0][ createSliceTuple( (stride[0] + r0[0], r0[1], r0[2]), zface_shape ) ] = face
return window
def predictImage(model, image, reduction_type = MULTICLASS_REDUCTION, stride=None, output_index=-1, sample_normalize=False, batch_size=2, debug=False, GPUS=1, shaper=upsample):
#input shape is (c, z, y, x)
input_shape = unetsl.model.getInputShape(model)
#TODO fix this.
image = unetsl.data.splitIntoChannels(input_shape, image)
#spatial dimensions
patch_shape = numpy.array(input_shape[1:])
if stride is None:
stride = patch_shape//2
for i, s in enumerate(stride):
if s==0:
stride[i] = 1
else:
stride=numpy.array(stride)
#single channel output, input shape spatial dimension.
out_shape = [1] + [s for s in image.shape[-3:]]
labelled_shape = [1] + list(patch_shape)
slices = []
chunks = []
for k in unetsl.data.fullRange(image.shape[-3], input_shape[-3], stride[-3]):
for j in unetsl.data.fullRange(image.shape[-2], input_shape[-2], stride[-2]):
for i in unetsl.data.fullRange(image.shape[-1], input_shape[-1], stride[-1]):
slc = createSliceTuple((0, k, j, i), input_shape)
slices.append(slc)
while len(slices)%GPUS != 0:
slices.append(slices[0])
if batch_size < 0:
batch_size = len(slices)
else:
if batch_size < GPUS:
batch_size=GPUS
#TODO improve the prediction window to not overwrite more edge-cases.
window = generateProbabilityWindow(input_shape, labelled_shape, stride)
out_stack = []
debug_out_stack = []
nslices = len(slices)
nframes = image.shape[0]
for n_frame, frame in enumerate(image):
out = numpy.zeros(out_shape, dtype="uint8")
debug_out = numpy.zeros(out_shape, dtype="float32")
for j in range(0, len(slices), batch_size):
to_send = batch_size
if len(slices)<j + batch_size:
to_send = len(slices) - j
s_slices = slices[j:j+to_send]
chunks = numpy.array(
[ frame[slc] for slc in s_slices ],
dtype="uint16"
)
if sample_normalize:
chunks = unetsl.data.normalizeImages(chunks)
predictions = model.predict(chunks)
print(j + n_frame*nslices, " of ", nslices*nframes)
if isinstance(predictions, list):
predictions = shaper(predictions[output_index], patch_shape)
for (slc, prediction) in zip(s_slices, predictions):
labels, probs = predictionToLabelledImage(prediction, reduction_type, labelled_shape)
if reduction_type==CATEGORICAL_REDUCTION:
#TODO include the window for probability comparisons.
org = out[slc]
old = debug_out[slc]
imp = (probs>old)*1
nimp = 1-imp
upd = (probs==old)*labels
debug_out[slc] =probs*imp + old*nimp
out[slc] = (nimp)*(org | upd ) + labels*imp
elif reduction_type==MULTICLASS_REDUCTION:
original_probs = debug_out[slc]
improving = numpy.where(window>original_probs)
updating = numpy.where(window==original_probs)
out[slc][improving] = labels[improving]
out[slc][updating] |= labels[updating]
debug_out[slc][improving] = window[improving]
elif reduction_type==LINEAR_REDUCTION:
original_probs = debug_out[slc]
improving = numpy.where(window>original_probs)
updating = numpy.where(window==original_probs)
out[slc][improving] = labels[improving]
out[slc][updating] = (labels[updating] + out[slc][updating])/2
debug_out[slc][improving] = window[improving]
out_stack.append(out)
debug_out_stack.append(debug_out)
return numpy.array(out_stack), numpy.array(debug_out_stack)
def predictMultiChannelImage(model, image, reduction_type =[], stride=None, output_index=None, sample_normalize=False, batch_size=2, debug=False, GPUS=1, shapers=[upsample, ]):
"""
The goal is to support multi-channel predictions where a channel denotes an output. Each channel will then have an associated
reduction type and sizer.
If not supplied the reduction type will be inferred by the op name of output tensor.
The sizer operation will default to "upsample" as there is no way to infer the re-sizing operation
from the information in the model. Possibly include the sizer type in the output-name.
output_index needs to be indexes for reduction_type and shapers eg: if reduction_type and shapers are
dictionaries, the output index should be keys (probably strings) and if they're lists
then the output index should be valid integers.
Args:
model ( tf.Model ):
image ( numpy.array ):
reduction_type ():
stride ()
output_index
sample_normalize
batch_size
debug
GPUS
shapers
Return:
numpy array, numpy array:
The first image is the prediction, after processing. The second image
is a debug image, if applicable.
"""
reduction_types = reduction_type
#input shape is (c, z, y, x)
input_shape = unetsl.model.getInputShape(model)
image = unetsl.data.splitIntoChannels(input_shape, image)
print("predicting with shape (n, c, z, y, x) :: ", image.shape)
patch_shape = numpy.array(input_shape[1:])
if stride is None:
stride = patch_shape//2
for i, s in enumerate(stride):
if s==0:
stride[i] = 1
else:
stride=numpy.array(stride)
outputs = unetsl.model.getOutputMap(model)
if output_index is None:
output_index = [i for i in range(len(outputs))]
noc = len(output_index)
if len(reduction_type) == 0:
reduction_types = guessOutputReductionTypes(model)
if len(reduction_type)==noc:
reduction_types = reduction_type
elif len(reduction_type)==1:
reduction_types = noc*reduction_type
#output channels / spatial dimensions
out_shape = [noc] + [s for s in image.shape[-3:]]
labelled_shape = [1] + list(patch_shape)
slices = []
for k in unetsl.data.fullRange(image.shape[-3], input_shape[-3], stride[-3]):
for j in unetsl.data.fullRange(image.shape[-2], input_shape[-2], stride[-2]):
for i in unetsl.data.fullRange(image.shape[-1], input_shape[-1], stride[-1]):
slc = createSliceTuple((0, k, j, i), input_shape)
slices.append(slc)
while len(slices)%GPUS != 0:
slices.append(slices[0])
if batch_size < 0:
batch_size = len(slices)
else:
if batch_size < GPUS:
batch_size=GPUS
#TODO improve the prediction window to not overwrite more edge-cases.
window = generateProbabilityWindow(input_shape, labelled_shape, stride)
full_out_stack = []
debug_out_stack = []
for frame in image:
full_out = numpy.zeros(out_shape, dtype="uint8")
full_debug_out = numpy.zeros(out_shape, dtype="float32")
for j in range(0, len(slices), batch_size):
to_send = batch_size
if len(slices)<j + batch_size:
to_send = len(slices) - j
s_slices = slices[j:j+to_send]
chunks = numpy.array(
[ frame[slc] for slc in s_slices ],
dtype="uint16"
)
if sample_normalize:
chunks = unetsl.data.normalizeImages(chunks)
predictions = model.predict(chunks)
for index, oi in enumerate(output_index):
predictions[index] = shapers[oi](
predictions[index],
labelled_shape )
for ch, pred in enumerate(predictions):
out = full_out[ch:ch+1]
debug_out = full_debug_out[ch:ch+1]
reduction_type = reduction_types[ch]
for (slc, prediction) in zip(s_slices, pred):
labels, probs = predictionToLabelledImage(prediction, reduction_type, labelled_shape)
if reduction_type==CATEGORICAL_REDUCTION:
#TODO include the window for probability comparisons.
probs = probs*window
org = out[slc]
old = debug_out[slc]
imp = (probs>old)*1
nimp = 1-imp
upd = (probs==old)*labels
debug_out[slc] =probs*imp + old*nimp
out[slc] = (nimp)*(org | upd ) + labels*imp
elif reduction_type==MULTICLASS_REDUCTION:
original_probs = debug_out[slc]
improving = numpy.where(window>original_probs)
updating = numpy.where(window==original_probs)
out[slc][improving] = labels[improving]
out[slc][updating] |= labels[updating]
debug_out[slc][improving] = window[improving]
elif reduction_type==LINEAR_REDUCTION:
original_probs = debug_out[slc]
improving = numpy.where(window>original_probs)
updating = numpy.where(window==original_probs)
out[slc][improving] = labels[improving]
out[slc][updating] = (labels[updating] + out[slc][updating])/2
debug_out[slc][improving] = window[improving]
print( ( j+ batch_size), "completed of:", len(slices) )
debug_out_stack.append(debug_out)
full_out_stack.append(full_out);
return numpy.array(full_out_stack), numpy.array(debug_out_stack)
|
the-stack_0_26966
|
#This contains settings for the whole project access
#import other settings
from .local_settings import *
USSD={
'code':'421',
'sub_code':'22', #if no subscode, let it be assigned to None
'endpoint':'http://127.0.0.1:9000/ussd'
}
|
the-stack_0_26970
|
#!/usr/bin/env python
# coding: utf-8
# ## Importing Required libraries
# In[271]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split,RandomizedSearchCV
from sklearn.metrics import mean_squared_error
from math import sqrt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# In[272]:
# Loading the dataset
data = pd.read_csv("day.csv")
# In[273]:
#Checking the dimensions of dataset
data.shape
# In[274]:
data.head()
# In[275]:
#Inital insight of datset
data.describe()
# In[276]:
data.dtypes
# In[277]:
## Removing unnessary variables from dataset
# instant - It is basically index number
# dteday - All the values from dteday are present in datset under differnet variables
# casual and registered - cnt is basically the sum of casual amd registerd variables
data = data.drop(['instant','dteday','casual','registered'],axis=1)
# In[278]:
#Creating a copy of original dataset
data_vis = data.copy()
data.head()
# In[279]:
##Converting the interger values into proper naming
data_vis['season'] = data_vis['season'].replace([1,2,3,4],['Springer','summer','fall','winter'])
data_vis['yr'] = data_vis['yr'].replace([0,1],[2011,2012])
data_vis['weathersit'] = data_vis['weathersit'].replace([1,2,3,4],[' Clear+Few clouds+Partly cloudy','Mist + Cloudy, Mist + Broken clouds, ',' Light Snow, Light Rain + Thunderstorm ','Heavy Rain + Ice Pallets '])
data_vis['holiday'] = data_vis['holiday'].replace([0,1],['working Day','Holiday'])
data_vis.head()
# In[280]:
print(data.dtypes)
print(data.head())
# ## Univarient Analysis
# In[281]:
## Bar Graph for Categorical data
sns.set_style("whitegrid")
sns.factorplot(data=data_vis,x='season',kind='count',size=4,aspect=2)
sns.factorplot(data=data_vis,x='yr',kind='count',size=4,aspect=2)
sns.factorplot(data=data_vis,x='mnth',kind='count',size=4,aspect=2)
sns.factorplot(data=data_vis,x='holiday',kind='count',size=4,aspect=2)
sns.factorplot(data=data_vis,x='workingday',kind='count',size=4,aspect=2)
sns.factorplot(data=data_vis,x='weathersit',kind='count',size=4,aspect=2)
# In[282]:
plt.hist(data_vis['temp'],bins=30)
plt.xlabel('temp')
plt.ylabel('Frequency')
plt.show()
# In[283]:
plt.hist(data_vis['atemp'],bins=30)
plt.xlabel('atemp')
plt.ylabel('Frequency')
plt.show()
# In[284]:
plt.hist(data_vis['hum'],bins=30)
plt.xlabel('humidity')
plt.ylabel('Frequency')
plt.show()
# In[285]:
plt.hist(data_vis['windspeed'],bins=30)
plt.xlabel('WindSpeed')
plt.ylabel('Frequency')
plt.show()
# In[286]:
#Dimensions of dataset after removing outliers
data.shape
# ## Bivariant Analysis
# In[287]:
## Using Scatter Plot
# Index(['instant', 'season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday',
# 'weathersit', 'temp', 'atemp', 'hum', 'windspeed', 'casual',
# 'registered', 'cnt'],
# dtype='object')
# In[288]:
fig,x = plt.subplots(nrows= 2,ncols=2)
fig.set_size_inches(12,15)
sns.scatterplot(x="temp",y = "cnt",data = data_vis,palette="Set3",ax=x[0][0])
sns.scatterplot(x="atemp",y = "cnt",data = data_vis,palette="Set3",ax=x[0][1])
sns.scatterplot(x="hum",y = "cnt",data = data_vis,palette="Set3",ax=x[1][0])
sns.scatterplot(x="windspeed",y = "cnt",data = data_vis,palette="Set3",ax=x[1][1])
# ## Outlier Analysis
# In[289]:
## Checking the presence of outlier in continous variables
sns.boxplot(data = data[['temp','atemp','windspeed','hum']])
fig = plt.gcf()
fig.set_size_inches(8,8)
# In[290]:
## Removing outlier and checking correlation between target variable and independent continous variables
print(data.shape)
print(data['hum'].corr(data['cnt']))
print(data['windspeed'].corr(data['cnt']))
q75, q25 = np.percentile(data.loc[:,'hum'],[75,25])
iqr = q75 - q25
min = q25-(iqr*1.5)
max = q75+(iqr*1.5)
print(min)
print(max)
data = data.drop(data[data.loc[:,'hum']<min].index)
data = data.drop(data[data.loc[:,'hum']>max].index)
q75, q25 = np.percentile(data.loc[:,'windspeed'],[75,25])
iqr = q75 - q25
min = q25-(iqr*1.5)
max = q75+(iqr*1.5)
print(min)
print(max)
data = data.drop(data[data.loc[:,'windspeed']<min].index)
data = data.drop(data[data.loc[:,'windspeed']>max].index)
# ## Missing Value Analysis
# In[291]:
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(30)
# There are no missing vlaues present after outlier analysis
# ## Feature Selection
# In[292]:
def Correlation(df):
df_corr = df.loc[:,df.columns]
corr = df_corr.corr()
sns.set()
plt.figure(figsize=(10,10))
sns.heatmap(corr,annot=True,fmt=".3f",square=True,linewidths=0.5)
Correlation(data)
# In[293]:
## There is high correlation between temp and atemp variable
## there is very weak relation between holiday, weekday and working day variables
## So we will drop those variables
data_fs = data.drop(['atemp','holiday','weekday','workingday'],axis=1)
data_fs.head()
# In[294]:
# Splitting Dataset into train and test dataset
train,test = train_test_split(data_fs,test_size=0.2,random_state=121)
# ## Feature Scaling
# In[295]:
## Data is normalized no need to do feature scaling
train.head()
# ## Error Metrics
# In[296]:
## Defining Performance Metrics
def MAPE(y_true, y_pred):
MAE = np.mean(np.abs((y_true - y_pred)))
mape = np.mean(np.abs((y_true - y_pred) / y_true))
print("MAE is:", MAE)
print("MAPE is:", mape)
return mape
def RMSE(y_true, y_pred):
mse = np.mean((y_true - y_pred)**2)
rmse = np.sqrt(mse)
print("MSE: ",mse)
print("RMSE: ",rmse)
return rmse
# ## Linear Regression
# In[297]:
LR_model = sm.OLS(train.iloc[:,7],train.iloc[:,0:6]).fit()
#Summary
print(LR_model.summary())
#Predict
LR_Model_predict = LR_model.predict(test.iloc[:,0:6])
# In[298]:
MAPE(test.iloc[:,7],LR_Model_predict)
RMSE(test.iloc[:,7],LR_Model_predict)
# In[299]:
result = pd.DataFrame({'Actual Value':test.iloc[:,7],'Linear Regression':LR_Model_predict})
result.head()
# ## Desicion Tree
# In[300]:
DT_model = DecisionTreeRegressor(random_state=100).fit(train.iloc[:,0:6],train.iloc[:,7])
#prediction
DT_model_predict = DT_model.predict(test.iloc[:,0:6],DT_model)
# In[ ]:
# In[301]:
MAPE(test.iloc[:,7],DT_model_predict)
RMSE(test.iloc[:,7],DT_model_predict)
# In[302]:
result['Desicion Tree'] = DT_model_predict
result.head()
# ## Random Forest
# In[303]:
RF_model = RandomForestRegressor(random_state=123)
np.random.seed(10)
arg_dict = {'max_depth':[2,4,6,8,10],
'bootstrap':[True,False],
'max_features':['auto','sqrt','log2',None],
'n_estimators':[100,200,300,400,500]}
gs_randomForest = RandomizedSearchCV(RF_model,cv=10,param_distributions=arg_dict,
n_iter=10)
gs_randomForest.fit(train.iloc[:,0:6],train.iloc[:,7])
print("Best Parameters using random Search",
gs_randomForest.best_params_)
# In[304]:
RF_model.set_params(n_estimators = 500,
max_features='sqrt',
max_depth=8,
bootstrap=True)
RF_model.fit(train.iloc[:,0:6],train.iloc[:,7])
RF_model_predict = RF_model.predict(test.iloc[:,0:6])
# In[305]:
MAPE(test.iloc[:,7],RF_model_predict)
RMSE(test.iloc[:,7],RF_model_predict)
# In[306]:
result['Random Forest'] = RF_model_predict
result.head()
# From above models Random forest is performing well according to RMSE values
# In[307]:
#Saving the result of test data onto local machine
result.to_csv("Test_Result_python.csv",index=False)
# In[ ]:
|
the-stack_0_26971
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 17:43:57 2017
@author: robert
"""
import sys
if sys.version_info > (3, 3):
from pymysql import cursors
else:
from MySQLdb import cursors
from . import mono_tools
import logging
RECORDING = 0 #true if we want scapy magic
DECRYPTING = 0 #true if we want MITM magic
ID_CURRENT_SESSION = 0 #current id_session
logger = logging.getLogger("mono_param")
#Deprecated
def set_recording(recording, db):
set_int_param("recording", recording, db)
logging.getLogger("mono_param").debug("recording set to %s" % (recording))
def get_recording(db):
return get_int_param("recording", db)
def set_decrypting(decrypting, db):
set_int_param("decrypting", decrypting, db)
logging.getLogger("mono_param").debug("decrypting set to %s" % (decrypting))
def get_decrypting(db):
return get_int_param("decrypting", db)
def set_id_session(id_session, db):
set_int_param("id_session", id_session, db)
logging.getLogger("mono_param").debug("id_session set to %s" % (id_session))
def get_id_session(db):
return get_int_param("id_session", db)
#set current id_session
def set_int_param(param_name, param_value, db):
try:
cursor = db.cursor()
sql = "UPDATE PARAMS SET value_int = %s WHERE name=%s"
# Execute the SQL command
cursor.execute(sql, (param_value, param_name))
db.commit()
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
#set current id_session
def set_str_param(param_name, param_value, db):
try:
cursor = db.cursor()
sql = "UPDATE PARAMS SET value_str = %s WHERE name=%s"
# Execute the SQL command
cursor.execute(sql, (param_value, param_name))
db.commit()
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
#set current id_session
def get_int_param(param_name, db):
try:
cursor = db.cursor(cursors.DictCursor)
sql = "SELECT value_int FROM PARAMS WHERE name=%s"
# Execute the SQL command
cursor.execute(sql, (param_name,))
db.commit()
param = cursor.fetchone()
return param["value_int"]
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
#set current id_session
def get_str_param(param_name, db):
try:
cursor = db.cursor(cursors.DictCursor)
sql = "SELECT value_str FROM PARAMS WHERE name=%s"
# Execute the SQL command
cursor.execute(sql,(param_name,))
db.commit()
param = cursor.fetchone()
return param["value_str"]
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
|
the-stack_0_26974
|
import csv
import cv2
if __name__ == "__main__":
file = open("dataCSV.csv", 'w')
image = cv2.imread("pylogo.jpg", 1)
file.write("X,Y,R,G,B\n")
for y in range(0, image.shape[0]):
for x in range(0, image.shape[1]):
line = str(x)+","+str(y)+","+str(image[y,x][2])+","+str(image[y,x][1])+","+str(image[y,x][0])+"\n"
file.write(line)
file.close()
|
the-stack_0_26975
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import base64
from math import log
import ressources.config as config
def bytes_to_int(b: bytes):
"""Take bytes as input and return associated integer."""
return int().from_bytes(b, "big")
def int_to_bits(i: int):
"""Take an integer as input and return the bits written version."""
return "{:0{}b}".format(i, i.bit_length())
def isBase64(sb):
"""
Check if both string and bytes objects are in base64.
"""
try:
if isinstance(sb, str):
# If there's any unicode here, an exception will be thrown and the function will return false
sb_bytes = bytes(sb, "ascii")
elif isinstance(sb, bytes):
sb_bytes = sb
else:
raise ValueError("Argument must be string or bytes")
return base64.b64encode(base64.b64decode(sb_bytes)) == sb_bytes
except Exception:
return False
def bytes_needed(n: int):
"""Return BYTES needed to encode an integer."""
if n == 0:
return 1
return int(log(n, 256)) + 1
def mult_to_bytes(obj: object) -> bytes:
"""Convert given {array of bits, bytes, int, str, b64} to bytes"""
if isinstance(obj, list):
i = int("".join(["{:01b}".format(x) for x in obj]), 2)
res = i.to_bytes(bytes_needed(i), byteorder="big")
elif isinstance(obj, int):
res = obj.to_bytes(bytes_needed(obj), "big")
elif isBase64(obj):
res = base64.b64decode(obj)
elif isinstance(obj, bytes):
res = obj
elif isinstance(obj, str):
alphabet = max([int(c) for c in obj]) + 1
res = int(obj, alphabet)
return mult_to_bytes(res)
else:
res = bytes(obj)
return res
def swapPos(toSwap, pos1, pos2):
toSwap[pos1], toSwap[pos2] = toSwap[pos2], toSwap[pos1]
return toSwap
def fileToBytes(file, message=True, directory=config.DIRECTORY_PROCESSING, Verbose=False):
"""
Read a file and convert to bytearray.
True if it's a .txt file with a message.
"""
if Verbose:
print(f"Opening the {file} file.")
readTime = time.time()
with open(os.path.join(directory, file), "rb") as f:
data = bytearray(f.read())
config.WATCH_READ_TIME = time.time() - readTime
if not message: # If it's a file
if len(data) < 1024: # At least some kilo_octets
raise Exception("Give in input at least some kilo_octets file's.")
return data
###################### - File Manager
def codeOut(thing, coded=True, inFile=""):
"""
Choose what to do with the text (ciphered or not) and deal with it.
thing: Array of bytesArrays
"""
# Pack and remove null bytes added by z_filling.
if not coded:
thing[-1] = thing[-1].replace(b"\x00", b"")
packed = packSplittedBytes(thing)
if inFile:
wTime = time.time()
if coded:
# Let's write byte per byte into a .kat file
katFile = open(inFile + ".kat", "wb")
katFile.write(bytes(packed))
else:
katFile = open(
os.path.join(os.path.dirname(inFile), f"dec_{os.path.basename(inFile)[:-4]}"),
"wb",
)
katFile.write(bytes(packed))
katFile.close()
config.WATCH_WRITE_TIME = time.time() - wTime
config.WATCH_EXEC_STATUS = False
return ""
else:
if coded:
return base64.b64encode(packed).decode()
try:
decoded = packed.decode()
return decoded
except UnicodeDecodeError:
raise UnicodeDecodeError(
"Unable to decode the message, the decryption method does not match the encryption method, the wrong key is used or the encrypted message has been corrupted.\n"
)
def zfill_b(byteA, n: int):
"""
Fill byte till length n.
Output: bytes
"""
if not isinstance(byteA, bytearray):
byteA = bytearray(byteA)
while n > len(byteA):
byteA.insert(0, 0)
return bytes(byteA)
def b_op(b1, b2, ope="XOR"):
"""
Bitwise operation between two bytes (XOR, AND, OR available)
Output: bytes
"""
by = 0
m = max(len(b1), len(b2))
if len(b1) != len(b2):
b1 = zfill_b(b1, m)
b2 = zfill_b(b2, m)
b1 = bytes_to_int(b1)
b2 = bytes_to_int(b2)
if ope == "XOR":
by = b1 ^ b2
elif ope == "AND":
by = b1 & b2
elif ope == "OR":
by = b1 | b2
else:
raise ValueError("Operation unvailable")
return int.to_bytes(by, m, "big")
def splitBytes(data, n=8):
"""Split BytesArray into chunks of n (=8 by default) bytes."""
return [data[i: i + n] for i in range(0, len(data), n)]
def packSplittedBytes(pSplitted):
"""
Unsplit splitted array of bytes.
Output: byterarray
"""
packed = bytearray()
for elt in pSplitted:
packed += elt
return packed
def circularRotation(arr, direction=0, n=1):
"""
Circular shift to direction (left=0, right=1) of n (=1 by default) bits
Output: bytes
"""
nB = len(arr) * 8
arrInt = int.from_bytes(arr, "big")
# Generate full bytes of 1 of the size of the array
size = int("0x" + "".join(["FF" for _ in range(0, len(arr))]), 16)
# ((arrInt << n) shift to left, create 0 to the right
# (arrInt >> (nB - n))) get all bytes from left who needs to go right to the right, remainder is 0
# AND the two bytes
# & size remove from left the oversized bits
r = 0
if direction == 0:
r = ((arrInt << n) | (arrInt >> (nB - n))) & size
else:
r = ((arrInt >> n) | (arrInt << (nB - n))) & size
return r.to_bytes(len(arr), "big")
def hammingWeight(n: object):
"""
The number of symbols that are different from the zero-symbol in binary.
"""
n = bytes_to_int(mult_to_bytes(n))
weight = 0
for i in range(n.bit_length()):
if (n >> i) & 1:
weight += 1
return weight
|
the-stack_0_26978
|
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from cibyl.exceptions import CibylException
CONFIG_DOCS_URL = "https://cibyl.readthedocs.io/en/latest/configuration.html"
class InvalidConfiguration(CibylException):
"""Invalid configuration exception"""
def __init__(self, message="""
Invalid Configuration.
A valid configuration should specify an environment, its system(s) and the
system(s) details
environments:
env_1:
jenkins_system:
system_type: jenkins"""):
self.message = message
super().__init__(self.message)
class ConfigurationNotFound(CibylException):
"""Configuration file not found exception"""
def __init__(self, paths):
if paths:
paths = f" at: {paths}"
else:
paths = ""
self.message = f"""Could not find configuration file{paths}.\n
Check the documentation at {CONFIG_DOCS_URL} for more information"""
super().__init__(self.message)
class EmptyConfiguration(CibylException):
"""Configuration file is empty exception."""
def __init__(self, file):
self.message = f"""Configuration file {file} is empty.\n
Check the documentation at {CONFIG_DOCS_URL} for more \
details about the configuration syntax."""
super().__init__(self.message)
class InvalidSourceConfiguration(CibylException):
"""Invalid source configuration exception."""
def __init__(self, source_name, source_data):
self.message = f"""Invalid source configuration.
{source_name}: {source_data}
Check the documentation at {CONFIG_DOCS_URL} for more information"""
super().__init__(self.message)
|
the-stack_0_26980
|
# ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import pytest
from neon import NervanaObject
from neon.backends import gen_backend
from neon.layers.layer import Convolution
import ngraph as ng
from ngraph.frontends.neon import ax, ar
from ngraph.frontends.neon.layer import output_dim
from ngraph.testing import ExecutorFactory, RandomTensorGenerator, executor
rng = RandomTensorGenerator(0, np.float32)
NervanaObject.be = gen_backend()
class DummyDeltaBuffers(object):
"""
Dummy class for delta buffers needed by neon
"""
def __init__(self):
self.buffers = [None]
def test_wrong_filters_shape_length():
"""
test wrong filters shape length
"""
padding = dict(pad_d=0, pad_h=0, pad_w=0)
strides = dict(str_d=1, str_h=1, str_w=1)
dilation = dict(dil_d=1, dil_h=1, dil_w=1)
conv_params = padding.copy()
conv_params.update(strides)
conv_params.update(dilation)
ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S])
inputs = ng.placeholder(ax_i)
filters = ng.placeholder(ax_f)
with pytest.raises(ValueError) as exinfo:
ng.convolution(conv_params, inputs, filters, {})
assert str(exinfo.value) == 'convolution filter shape must be length 5, found {}'\
.format(len(ax_f))
def test_first_axes_not_same():
"""
test first axes are not the same
"""
padding = dict(pad_d=0, pad_h=0, pad_w=0)
strides = dict(str_d=1, str_h=1, str_w=1)
dilation = dict(dil_d=1, dil_h=1, dil_w=1)
conv_params = padding.copy()
conv_params.update(strides)
conv_params.update(dilation)
ax_i = ng.make_axes([ax.D, ax.C, ax.H, ax.W, ax.N])
ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
inputs = ng.placeholder(ax_i)
filters = ng.placeholder(ax_f)
with pytest.raises(ValueError) as exinfo:
ng.convolution(conv_params, inputs, filters, {})
assert str(exinfo.value) == 'the first axis in input {inputs} and filter {filters} ' \
'are not the same.'.format(
inputs=inputs.axes[0],
filters=filters.axes[0])
def test_wrong_number_of_batch_axes_at_input():
"""
test wrong number of batch axes at input
"""
padding = dict(pad_d=0, pad_h=0, pad_w=0)
strides = dict(str_d=1, str_h=1, str_w=1)
dilation = dict(dil_d=1, dil_h=1, dil_w=1)
conv_params = padding.copy()
conv_params.update(strides)
conv_params.update(dilation)
C = 3
D = 1
ax_C = ng.make_axis(name='N', length=C)
ax_D = ng.make_axis(name='N', length=D)
ax_i = ng.make_axes([ax_C, ax_D, ax.H, ax.W, ax.N])
ax_f = ng.make_axes([ax_C, ax.T, ax.R, ax.S, ax.K])
inputs = ng.placeholder(axes=ax_i)
filters = ng.placeholder(ax_f)
with pytest.raises(ValueError) as exinfo:
ng.convolution(conv_params, inputs, filters, {})
assert str(exinfo.value) == "Input must have one batch axis. Found {n_batch_axes} " \
"batch axes: {batch_axes} Found {n_sample_axes} sample axes: {sample_axes}.".format(
n_batch_axes=len(inputs.axes.batch_axes()),
batch_axes=inputs.axes.batch_axes(),
n_sample_axes=len(inputs.axes.sample_axes()),
sample_axes=inputs.axes.sample_axes())
def test_convolution_backprop(transformer_factory):
"""
test convolution backprop path
"""
N = 128
C, K = 3, 2
D, T = 1, 1
H = W = 32
R = S = 2
padding = dict(pad_d=0, pad_h=0, pad_w=0)
strides = dict(str_d=1, str_h=1, str_w=1)
dilation = dict(dil_d=1, dil_h=1, dil_w=1)
conv_params = padding.copy()
conv_params.update(strides)
conv_params.update(dilation)
ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
ax_i.set_shape((C, D, H, W, N))
ax_f.set_shape((C, T, R, S, K))
ax_o = ng.make_axes([
ng.make_axis(roles=[ar.features_input]).named('C'),
ng.make_axis(roles=[ar.features_0]).named('D'),
ng.make_axis(roles=[ar.features_1]).named('H'),
ng.make_axis(roles=[ar.features_2]).named('W'),
ax.N
])
ax_o[:-1].set_shape((
K,
output_dim(D, T, padding['pad_d'], strides['str_d']),
output_dim(H, R, padding['pad_h'], strides['str_h']),
output_dim(W, S, padding['pad_w'], strides['str_w']))
)
inputs = ng.placeholder(axes=ax_i)
filters = ng.placeholder(axes=ax_f)
# randomly initialize
input_value = rng.uniform(-1, 1, ax_i)
filter_value = rng.uniform(-1, 1, ax_f)
assert input_value.shape == ax_i.lengths
assert filter_value.shape == ax_f.lengths
output = ng.sum(ng.convolution(conv_params, inputs, filters, ax_o), out_axes=())
with ExecutorFactory() as factory:
dcdf_sym_fun = factory.derivative(output, filters, inputs)
dcdf_num_fun = factory.numeric_derivative(output, filters, .01, inputs)
dcdf_sym_val = dcdf_sym_fun(filter_value, input_value)
dcdf_num_val = dcdf_num_fun(filter_value, input_value)
ng.testing.assert_allclose(dcdf_sym_val, dcdf_num_val, rtol=1)
def test_convolution(transformer_factory):
"""
test convolution forward path
"""
N = 128
C, K = 3, 8
D, T = 1, 1
H = W = 32
R = S = 2
padding = dict(pad_d=0, pad_h=0, pad_w=0)
strides = dict(str_d=1, str_h=1, str_w=1)
dilation = dict(dil_d=1, dil_h=1, dil_w=1)
conv_params = padding.copy()
conv_params.update(strides)
conv_params.update(dilation)
ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
ax_i.set_shape((C, D, H, W, N))
ax_f.set_shape((C, T, R, S, K))
ax_o = ng.make_axes([
ng.make_axis(roles=[ar.features_input]).named('C'),
ng.make_axis(roles=[ar.features_0]).named('D'),
ng.make_axis(roles=[ar.features_1]).named('H'),
ng.make_axis(roles=[ar.features_2]).named('W'),
ax.N
])
ax_o[:-1].set_shape((
K,
output_dim(D, T, padding['pad_d'], strides['str_d']),
output_dim(H, R, padding['pad_h'], strides['str_h']),
output_dim(W, S, padding['pad_w'], strides['str_w']))
)
inputs = ng.placeholder(axes=ax_i)
filters = ng.placeholder(axes=ax_f)
# randomly initialize
input_value = rng.uniform(-1, 1, ax_i)
filter_value = rng.uniform(-1, 1, ax_f)
assert input_value.shape == ax_i.lengths
assert filter_value.shape == ax_f.lengths
inputs = ng.placeholder(ax_i)
filters = ng.placeholder(ax_f)
output = ng.convolution(conv_params, inputs, filters, axes=ax_o)
targets = ng.placeholder(axes=output.axes)
costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)
error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)
d_inputs = ng.deriv(error, inputs)
d_filters = ng.deriv(error, filters)
targets_value = rng.uniform(.1, 0.9, output.axes)
with executor([output, error, d_inputs, d_filters], inputs, filters, targets) as conv_executor:
result_ng, err_ng, gradI_ng, gradF_ng = \
conv_executor(input_value, filter_value, targets_value)
# Now compute reference values via NEON
NervanaObject.be.bsz = N
neon_layer = Convolution(fshape=(R, S, K), padding=padding, strides=strides)
inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))
neon_layer.W = neon_layer.be.array(filter_value.reshape(C * R * S * T, K))
neon_layer.dW = neon_layer.be.empty_like(neon_layer.W)
neon_layer.configure((C, H, W))
neon_layer.prev_layer = True
neon_layer.allocate()
neon_layer.set_deltas(DummyDeltaBuffers())
result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)
act_result_ne = 1. / (1.0 + np.exp(-result_ne))
err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))
gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)
gradF_ne = neon_layer.dW.get().reshape(ax_f.lengths)
# Compare fprop
ng.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)
# Compare bprop
ng.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)
# Compare update
ng.testing.assert_allclose(gradF_ng, gradF_ne, rtol=0, atol=1e-4)
def test_conv_flatten_deriv(transformer_factory):
"""
Test deriv of conv followed by flatten
"""
# set shape
# NOTE: N must be >= 4 for GPU, but for CPU this could be decreased to
# speed up the test
N = 4
C, D, H, W = (3, 1, 28, 28)
T, R, S, K = (1, 5, 5, 8)
params = dict(pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1, dil_d=1, dil_h=1, dil_w=1)
# i, f, o axes
ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
ax_o = ng.make_axes([
ng.make_axis(roles=[ar.features_input]).named('C'),
ng.make_axis(roles=[ar.features_0]).named('D'),
ng.make_axis(roles=[ar.features_1]).named('H'),
ng.make_axis(roles=[ar.features_2]).named('W'),
ax.N
])
ax_i.set_shape((C, D, H, W, N))
ax_f.set_shape((C, T, R, S, K))
ax_o.set_shape((K, D - T + 1, H - R + 1, W - S + 1, N))
axes_rsck = ng.make_axes([ax.R, ax.S, ax.C, ax.K])
axes_rsck_prime = ng.make_axes([ng.make_axis(axis.length).named(axis.name + 'p')
for axis in axes_rsck])
axes_nmpqk = ng.make_axes([ax_o[-1], ax_o[1], ax_o[2], ax_o[3], ax_o[0]])
# broadcast input / filter axes
input_var = ng.variable(ax_i).named('input')
input_var.input = True
input_val = np.ones(input_var.axes.lengths)
filter_rsck_prime = ng.variable(axes_rsck_prime)
filter_var = filter_rsck_prime
filter_rsck = ng.cast_axes(filter_rsck_prime, axes_rsck)
filter_trsck = ng.expand_dims(filter_rsck, ax.T, 0)
filter_ctrsk = ng.axes_with_order(filter_trsck, axes=ax_f)
# convolution
output_kmpqn = ng.convolution(params, input_var, filter_ctrsk, axes=ax_o)
output_nmpqk = ng.axes_with_order(output_kmpqn, axes=axes_nmpqk)
# slice away the oD
out_slicing = [slice(None), 0, slice(None), slice(None), slice(None)]
output_npqk = ng.tensor_slice(output_nmpqk, out_slicing)
output = ng.flatten_at(output_npqk, idx=1)
# cost and grad
cost = ng.sum(output, out_axes=())
filter_var.input = True
filter_var.named('filter')
filter_val = np.ones(filter_var.axes.lengths)
with ExecutorFactory() as factory:
conv_comp = factory.executor(output, filter_var, input_var)
grad_filter_num_comp = factory.numeric_derivative(cost, filter_var, 1.0, input_var)
grad_filter_sym_comp = factory.derivative(cost, filter_var, input_var)
grad_input_num_comp = factory.numeric_derivative(cost, input_var, 1.0, filter_var)
grad_input_sym_comp = factory.derivative(cost, input_var, filter_var)
conv_val = conv_comp(filter_val, input_val)
conv_val_num = np.empty_like(conv_val)
conv_val_num.fill(C * T * R * S)
assert ng.testing.allclose(conv_val, conv_val_num)
grad_filter_num_val = grad_filter_num_comp(filter_val, input_val)
grad_filter_sym_val = grad_filter_sym_comp(filter_val, input_val)
assert ng.testing.allclose(grad_filter_num_val, grad_filter_sym_val)
grad_input_num_val = grad_input_num_comp(input_val, filter_val)
grad_input_sym_val = grad_input_sym_comp(input_val, filter_val)
assert ng.testing.allclose(grad_input_num_val, grad_input_sym_val)
|
the-stack_0_26981
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import glob
import os.path
import pathlib
import pickle
import time
import typing as t
from dataclasses import dataclass
from datetime import datetime, timedelta
from threatexchange.signal_type.index import SignalTypeIndex
from threatexchange.signal_type.pdq_index import PDQIndex
from hmalib.common.logging import get_logger
from hmalib.common.models.pipeline import HashRecord
from hmalib.common.timebucketizer import TimeBucketizer
starttime = time.time()
logger = get_logger(__name__)
class LCCIndexer:
@classmethod
def get_recent_index(cls, storage_path, signal_type) -> PDQIndex:
"""Get the most recent index."""
directory = os.path.join(storage_path, signal_type)
latest_directory = max(pathlib.Path(directory).glob("*/"), key=os.path.getmtime)
with open(latest_directory, "rb") as f:
return pickle.load(f)
@classmethod
def build_index_from_last_24h(cls, signal_type, storage_path, bucket_width) -> None:
"""Create an index"""
d = timedelta(days=1)
past_day_content = TimeBucketizer.get_records(
(datetime.now() - d),
datetime.now(),
signal_type,
storage_path,
bucket_width,
HashRecord,
)
record_list = []
for record in past_day_content:
record_list.append((record.content_hash, record.content_id))
testIndex = PDQIndex.build(record_list)
return testIndex
@classmethod
def override_recent_index(
cls,
index: SignalTypeIndex,
signal_type,
storage_path,
bucket_width,
) -> None:
"""
get most recent index of type PDQ
write most recent index of specific index type
"""
creation_time = str(datetime.now().strftime("%Y-%m-%d_%H:%M"))
directory = os.path.join(storage_path, signal_type, creation_time)
with open(directory, "wb") as f:
pickle.dump(index, f)
|
the-stack_0_26983
|
from ...builder import DETECTORS
from .single_stage import SingleStageDetectorRbbox
@DETECTORS.register_module()
class RetinaNetOBB(SingleStageDetectorRbbox):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(RetinaNetOBB, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
the-stack_0_26987
|
"""Views pertaining to builds."""
import json
import logging
import re
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from readthedocs.builds.constants import LATEST
from readthedocs.core.utils import trigger_build
from readthedocs.projects import constants
from readthedocs.projects.models import Feature, Project
from readthedocs.projects.tasks import sync_repository_task
log = logging.getLogger(__name__)
class NoProjectException(Exception):
pass
def _allow_deprecated_webhook(project):
return project.has_feature(Feature.ALLOW_DEPRECATED_WEBHOOKS)
def _build_version(project, slug, already_built=()):
"""
Where we actually trigger builds for a project and slug.
All webhook logic should route here to call ``trigger_build``.
"""
if not project.has_valid_webhook:
project.has_valid_webhook = True
project.save()
# Previously we were building the latest version (inactive or active)
# when building the default version,
# some users may have relied on this to update the version list #4450
version = project.versions.filter(active=True, slug=slug).first()
if version and slug not in already_built:
log.info(
'(Version build) Building %s:%s',
project.slug,
version.slug,
)
trigger_build(project=project, version=version, force=True)
return slug
log.info('(Version build) Not Building %s', slug)
return None
def build_branches(project, branch_list):
"""
Build the branches for a specific project.
Returns:
to_build - a list of branches that were built
not_building - a list of branches that we won't build
"""
to_build = set()
not_building = set()
for branch in branch_list:
versions = project.versions_from_branch_name(branch)
for version in versions:
log.info(
'(Branch Build) Processing %s:%s',
project.slug,
version.slug,
)
ret = _build_version(project, version.slug, already_built=to_build)
if ret:
to_build.add(ret)
else:
not_building.add(version.slug)
return (to_build, not_building)
def sync_versions(project):
"""
Sync the versions of a repo using its latest version.
This doesn't register a new build,
but clones the repo and syncs the versions.
Due that `sync_repository_task` is bound to a version,
we always pass the default version.
:returns: The version slug that was used to trigger the clone.
:rtype: str
"""
try:
version_identifier = project.get_default_branch()
version = (
project.versions.filter(
identifier=version_identifier,
).first()
)
if not version:
log.info('Unable to sync from %s version', version_identifier)
return None
sync_repository_task.delay(version.pk)
return version.slug
except Exception:
log.exception('Unknown sync versions exception')
return None
def get_project_from_url(url):
if not url:
return Project.objects.none()
projects = (
Project.objects.filter(repo__iendswith=url) |
Project.objects.filter(repo__iendswith=url + '.git')
)
return projects
def log_info(project, msg):
log.info(
constants.LOG_TEMPLATE,
{
'project': project,
'version': '',
'msg': msg,
}
)
def _build_url(url, projects, branches):
"""
Map a URL onto specific projects to build that are linked to that URL.
Check each of the ``branches`` to see if they are active and should be
built.
"""
ret = ''
all_built = {}
all_not_building = {}
# This endpoint doesn't require authorization, we shouldn't allow builds to
# be triggered from this any longer. Deprecation plan is to selectively
# allow access to this endpoint for now.
if not any(_allow_deprecated_webhook(project) for project in projects):
return HttpResponse('This API endpoint is deprecated', status=403)
for project in projects:
(built, not_building) = build_branches(project, branches)
if not built:
# Call sync_repository_task to update tag/branch info
version = project.versions.get(slug=LATEST)
sync_repository_task.delay(version.pk)
msg = '(URL Build) Syncing versions for %s' % project.slug
log.info(msg)
all_built[project.slug] = built
all_not_building[project.slug] = not_building
for project_slug, built in list(all_built.items()):
if built:
msg = '(URL Build) Build Started: {} [{}]'.format(
url,
' '.join(built),
)
log_info(project_slug, msg=msg)
ret += msg
for project_slug, not_building in list(all_not_building.items()):
if not_building:
msg = '(URL Build) Not Building: {} [{}]'.format(
url,
' '.join(not_building),
)
log_info(project_slug, msg=msg)
ret += msg
if not ret:
ret = '(URL Build) No known branches were pushed to.'
return HttpResponse(ret)
@csrf_exempt
def github_build(request): # noqa: D205
"""
GitHub webhook consumer.
.. warning:: **DEPRECATED**
Use :py:class:`readthedocs.api.v2.views.integrations.GitHubWebhookView`
instead of this view function
This will search for projects matching either a stripped down HTTP or SSH
URL. The search is error prone, use the API v2 webhook for new webhooks.
Old webhooks may not have specified the content type to POST with, and
therefore can use ``application/x-www-form-urlencoded`` to pass the JSON
payload. More information on the API docs here:
https://developer.github.com/webhooks/creating/#content-type
"""
if request.method == 'POST':
try:
if request.META['CONTENT_TYPE'] == 'application/x-www-form-urlencoded':
data = json.loads(request.POST.get('payload'))
else:
data = json.loads(request.body)
http_url = data['repository']['url']
http_search_url = http_url.replace('http://', '').replace('https://', '')
ssh_url = data['repository']['ssh_url']
ssh_search_url = ssh_url.replace('git@', '').replace('.git', '')
branches = [data['ref'].replace('refs/heads/', '')]
except (ValueError, TypeError, KeyError):
log.exception('Invalid GitHub webhook payload')
return HttpResponse('Invalid request', status=400)
try:
repo_projects = get_project_from_url(http_search_url)
if repo_projects:
log.info(
'GitHub webhook search: url=%s branches=%s',
http_search_url,
branches,
)
ssh_projects = get_project_from_url(ssh_search_url)
if ssh_projects:
log.info(
'GitHub webhook search: url=%s branches=%s',
ssh_search_url,
branches,
)
projects = repo_projects | ssh_projects
return _build_url(http_search_url, projects, branches)
except NoProjectException:
log.exception('Project match not found: url=%s', http_search_url)
return HttpResponseNotFound('Project not found')
else:
return HttpResponse('Method not allowed, POST is required', status=405)
@csrf_exempt
def gitlab_build(request): # noqa: D205
"""
GitLab webhook consumer.
.. warning:: **DEPRECATED**
Use :py:class:`readthedocs.api.v2.views.integrations.GitLabWebhookView`
instead of this view function
Search project repository URLs using the site URL from GitLab webhook payload.
This search is error-prone, use the API v2 webhook view for new webhooks.
"""
if request.method == 'POST':
try:
data = json.loads(request.body)
url = data['project']['http_url']
search_url = re.sub(r'^https?://(.*?)(?:\.git|)$', '\\1', url)
branches = [data['ref'].replace('refs/heads/', '')]
except (ValueError, TypeError, KeyError):
log.exception('Invalid GitLab webhook payload')
return HttpResponse('Invalid request', status=400)
log.info(
'GitLab webhook search: url=%s branches=%s',
search_url,
branches,
)
projects = get_project_from_url(search_url)
if projects:
return _build_url(search_url, projects, branches)
log.info('Project match not found: url=%s', search_url)
return HttpResponseNotFound('Project match not found')
return HttpResponse('Method not allowed, POST is required', status=405)
@csrf_exempt
def bitbucket_build(request):
"""
Consume webhooks from multiple versions of Bitbucket's API.
.. warning:: **DEPRECATED**
Use :py:class:`readthedocs.api.v2.views.integrations.BitbucketWebhookView`
instead of this view function
New webhooks are set up with v2, but v1 webhooks will still point to this
endpoint. There are also "services" that point here and submit
``application/x-www-form-urlencoded`` data.
API v1
https://confluence.atlassian.com/bitbucket/events-resources-296095220.html
API v2
https://confluence.atlassian.com/bitbucket/event-payloads-740262817.html#EventPayloads-Push
Services
https://confluence.atlassian.com/bitbucket/post-service-management-223216518.html
"""
if request.method == 'POST':
try:
if request.META['CONTENT_TYPE'] == 'application/x-www-form-urlencoded':
data = json.loads(request.POST.get('payload'))
else:
data = json.loads(request.body)
version = 2 if request.META.get('HTTP_USER_AGENT') == 'Bitbucket-Webhooks/2.0' else 1 # yapf: disabled # noqa
if version == 1:
branches = [
commit.get('branch', '') for commit in data['commits']
]
repository = data['repository']
if not repository['absolute_url']:
return HttpResponse('Invalid request', status=400)
search_url = 'bitbucket.org{}'.format(
repository['absolute_url'].rstrip('/'),
)
elif version == 2:
changes = data['push']['changes']
branches = [change['new']['name'] for change in changes]
if not data['repository']['full_name']:
return HttpResponse('Invalid request', status=400)
search_url = 'bitbucket.org/{}'.format(
data['repository']['full_name'],
)
except (TypeError, ValueError, KeyError):
log.exception('Invalid Bitbucket webhook payload')
return HttpResponse('Invalid request', status=400)
log.info(
'Bitbucket webhook search: url=%s branches=%s',
search_url,
branches,
)
log.debug('Bitbucket webhook payload:\n\n%s\n\n', data)
projects = get_project_from_url(search_url)
if projects and branches:
return _build_url(search_url, projects, branches)
if not branches:
log.info(
'Commit/branch not found url=%s branches=%s',
search_url,
branches,
)
return HttpResponseNotFound('Commit/branch not found')
log.info('Project match not found: url=%s', search_url)
return HttpResponseNotFound('Project match not found')
return HttpResponse('Method not allowed, POST is required', status=405)
@csrf_exempt
def generic_build(request, project_id_or_slug=None):
"""
Generic webhook build endpoint.
.. warning:: **DEPRECATED**
Use :py:class:`readthedocs.api.v2.views.integrations.GenericWebhookView`
instead of this view function
"""
try:
project = Project.objects.get(pk=project_id_or_slug)
# Allow slugs too
except (Project.DoesNotExist, ValueError):
try:
project = Project.objects.get(slug=project_id_or_slug)
except (Project.DoesNotExist, ValueError):
log.exception(
'(Incoming Generic Build) Repo not found: %s',
project_id_or_slug,
)
return HttpResponseNotFound(
'Repo not found: %s' % project_id_or_slug,
)
# This endpoint doesn't require authorization, we shouldn't allow builds to
# be triggered from this any longer. Deprecation plan is to selectively
# allow access to this endpoint for now.
if not _allow_deprecated_webhook(project):
return HttpResponse('This API endpoint is deprecated', status=403)
if request.method == 'POST':
slug = request.POST.get('version_slug', project.default_version)
log.info(
'(Incoming Generic Build) %s [%s]',
project.slug,
slug,
)
_build_version(project, slug)
else:
return HttpResponse('You must POST to this resource.')
return redirect('builds_project_list', project.slug)
|
the-stack_0_26988
|
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
df = pd.read_csv(path)
# Code starts here
## Lets split the dependent and independent variables
X = df.drop(['customerID','Churn'],1)
y = df['Churn'].copy()
# Test train split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
#Replacing spaces with 'NaN' in train dataset
X_train['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Replacing spaces with 'NaN' in test dataset
X_test['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Converting the type of column from X_train to float
X_train['TotalCharges'] = X_train['TotalCharges'].astype(float)
#Converting the type of column from X_test to float
X_test['TotalCharges'] = X_test['TotalCharges'].astype(float)
#Filling missing values
X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean(),inplace=True)
X_test['TotalCharges'].fillna(X_train['TotalCharges'].mean(), inplace=True)
#Check value counts
print(X_train.isnull().sum())
cat_cols = X_train.select_dtypes(include='O').columns.tolist()
#Label encoding train data
for x in cat_cols:
le = LabelEncoder()
X_train[x] = le.fit_transform(X_train[x])
#Label encoding test data
for x in cat_cols:
le = LabelEncoder()
X_test[x] = le.fit_transform(X_test[x])
#Encoding train data target
y_train = y_train.replace({'No':0, 'Yes':1})
#Encoding test data target
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
X_train.head()
X_test.head()
y_train.head()
y_test.head()
ada_model = AdaBoostClassifier(random_state=0)
ada_model.fit(X_train,y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test,y_pred)
ada_cm = confusion_matrix(y_test,y_pred)
ada_cr = classification_report(y_pred,y_test)
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model = XGBClassifier(random_state=0)
xgb_model.fit(X_train,y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test,y_pred)
xgb_cm = confusion_matrix(y_test,y_pred)
xgb_cr = classification_report(y_pred,y_test)
clf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters)
clf_model.fit(X_train,y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test,y_pred)
clf_cm = confusion_matrix(y_test,y_pred)
clf_cr = classification_report(y_pred,y_test)
print(xgb_score)
print(xgb_cm)
print(xgb_cr)
print('='*20)
print(clf_score)
print(clf_cm)
print(clf_cr)
|
the-stack_0_26989
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://ru.wikipedia.org/wiki/BMP
# SOURCE: https://en.wikipedia.org/wiki/BMP_file_format
import glob
import struct
# https://en.wikipedia.org/wiki/BMP_file_format#DIB_header_(bitmap_information_header)
SIZE_BY_HEADER_TYPE = {
12: 'BITMAPCOREHEADER / OS21XBITMAPHEADER',
64: 'OS22XBITMAPHEADER',
16: 'OS22XBITMAPHEADER',
40: 'BITMAPINFOHEADER',
52: 'BITMAPV2INFOHEADER',
56: 'BITMAPV3INFOHEADER',
108: 'BITMAPV4HEADER',
124: 'BITMAPV5HEADER',
}
def print_info(file_name: str):
with open(file_name, 'rb') as f:
# Bitmap file header
# BITMAPFILEHEADER
bfType = f.read(2)
print('bfType:', bfType)
data = f.read(12)
bfSize, bfReserved1, bfReserved2, bfOffBits \
= struct.unpack('<IHHI', data)
print('bfSize:', bfSize)
print('bfReserved1:', bfReserved1)
print('bfReserved2:', bfReserved2)
print('bfOffBits:', bfOffBits)
# DIB header
data = f.read(4)
size = struct.unpack('<I', data)[0]
print('size:', size)
print('Header:', SIZE_BY_HEADER_TYPE.get(size, '<Unknown>'))
if __name__ == '__main__':
for file_name in glob.glob('*.bmp'):
print(file_name)
print_info(file_name)
print()
|
the-stack_0_26990
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import pickle as pkl
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import torch.nn.functional as F
from torch.autograd import Variable
from model import AttentionLSTMClassifier
from torch.utils.data import Dataset, DataLoader
from early_stop import EarlyStop
from measurement import CalculateFM
import numpy as np
import matplotlib.pyplot as plt
class DataSet(Dataset):
def __init__(self, __fold_path, __pad_len, __word2id, __num_labels, max_size=None):
self.pad_len = __pad_len
self.word2id = __word2id
self.pad_int = __word2id['<pad>']
if max_size is not None:
self.source = self.source[:max_size]
self.target = self.target[:max_size]
self.tag = self.tag[:max_size]
self.data = []
self.label = []
self.num_label = __num_labels
self.seq_len = []
self.read_data(__fold_path)
assert len(self.seq_len) == len(self.data) == len(self.label)
def read_data(self, __fold_path):
with open(__fold_path, 'r') as f:
for line in f.readlines():
tokens = line.split('\t')
tmp = [self.word2id[x] for x in tokens[1].split() if x in self.word2id]
if len(tmp) == 0:
tmp = [self.word2id['<unk>']]
self.seq_len.append(len(tmp) if len(tmp) < self.pad_len else self.pad_len)
if len(tmp) > self.pad_len:
tmp = tmp[: self.pad_len]
self.data.append(tmp + [self.pad_int] * (self.pad_len - len(tmp)))
tmp2 = tokens[2:]
a_label = [0] * self.num_label
for item in tmp2:
a_label[int(item)] = 1
self.label.append(a_label)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return torch.LongTensor(self.data[idx]), torch.LongTensor([self.seq_len[idx]]), torch.FloatTensor(self.label[idx])
def build_vocab(fold_path, use_unk=True):
word_count = {}
word2id = {}
id2word = {}
with open(os.path.join(fold_path, 'vocubulary.txt')) as f:
# add <pad> first
word2id['<pad>'] = 0
id2word[0] = '<pad>'
if use_unk:
word2id['<unk>'] = 1
id2word[1] = '<unk>'
n = len(word2id)
for word in f.readlines():
w = word.strip()
word2id[w] = n
id2word[n] = w
n += 1
return word2id, id2word
def sort_batch(batch, ys, lengths):
seq_lengths, perm_idx = lengths.sort(0, descending=True)
seq_tensor = batch[perm_idx]
targ_tensor = ys[perm_idx]
return seq_tensor, targ_tensor, seq_lengths
def one_fold(fold_int, is_nine_folds):
fold_id = str(fold_int)
if is_nine_folds:
fold_path = 'data/Folds_9_Emotions/fold_' + fold_id
num_labels = 9
else:
fold_path = 'data/Folds/fold_' + fold_id
num_labels = 16
pad_len = 30
batch_size = 64
hidden_dim = 600
word2id, id2word = build_vocab(fold_path, use_unk=True)
vocab_size = len(word2id)
embedding_dim = len(word2id)
es = EarlyStop(2)
train_data = DataSet(os.path.join(fold_path, 'train.csv'), pad_len, word2id, num_labels)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_data = DataSet(os.path.join(fold_path, 'test.csv'), pad_len, word2id, num_labels)
test_loader = DataLoader(test_data, batch_size=batch_size)
model = AttentionLSTMClassifier(embedding_dim, hidden_dim, vocab_size, word2id,
num_labels, batch_size)
model.load_bog_embedding(word2id)
model.cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))
loss_criterion = nn.BCELoss()
for epoch in range(4):
print('Epoch:', epoch, '===================================')
train_loss = 0
for i, (data, seq_len, label) in enumerate(train_loader):
data, label, seq_len = sort_batch(data, label, seq_len.view(-1))
y_pred = model(Variable(data).cuda(), seq_len)
optimizer.zero_grad()
loss = loss_criterion(y_pred, Variable(label).cuda())
loss.backward()
optimizer.step()
train_loss += loss.data[0]
pred_list = []
gold_list = []
test_loss = 0
for i, (data, seq_len, label) in enumerate(test_loader):
data, label, seq_len = sort_batch(data, label, seq_len.view(-1))
y_pred = model(Variable(data, volatile=True).cuda(), seq_len)
loss = loss_criterion(y_pred, Variable(label, volatile=True).cuda())
test_loss += loss.data[0]
pred_list.append(y_pred.data.cpu().numpy())
gold_list.append(label.numpy())
print("Train Loss: ", train_loss, " Evaluation: ", test_loss)
es.new_loss(test_loss)
if es.if_stop():
print('Start over fitting')
break
f_ma = []
f_mi = []
for threshold in range(0, 100, 5):
threshold /= 100
tmp = CalculateFM(np.concatenate(pred_list, axis=0), np.concatenate(gold_list, axis=0), threshold=threshold)
f_ma.append(tmp['MacroFM'])
f_mi.append(tmp['MicroFM'])
return f_ma, f_mi
if __name__ == '__main__':
f_ma_list = []
f_mi_list = []
for i in range(5):
f_ma, f_mi = one_fold(i, is_nine_folds=True)
f_ma_list.append(f_ma)
f_mi_list.append(f_mi)
f_ma_np_9 = np.asarray(f_ma_list)#.mean(axis=0)
f_mi_np_9 = np.asarray(f_mi_list)#.mean(axis=0)
f_ma_list = []
f_mi_list = []
for i in range(5):
f_ma, f_mi = one_fold(i, is_nine_folds=False)
f_ma_list.append(f_ma)
f_mi_list.append(f_mi)
f_ma_np_16 = np.asarray(f_ma_list)#.mean(axis=0)
f_mi_np_16 = np.asarray(f_mi_list)#.mean(axis=0)
import scipy.io as sio
sio.savemat('bow5k.mat', {'bow_9_ma': f_ma_np_9,
'bow_9_mi': f_mi_np_9,
'bow_16_ma': f_ma_np_16,
'bow_16_mi': f_mi_np_16})
# print(f_ma_np_16[3])
# print(f_mi_np_16[3])
#
# t = np.arange(0., 1., 0.05)
#
# plt.plot(t, f_ma_np_16, 'b--', label='Macro FM')
# plt.plot(t, f_ma_np_16, 'r-.', label='Micro FM')
# plt.legend()
#
# plt.show()
|
the-stack_0_26992
|
import numpy as np
import tensorflow as tf
import locale
locale.setlocale(locale.LC_ALL, '')
_params = {}
_qparams = {}
_param_aliases = {}
def qparam(name, cls, args):
if name not in _qparams:
instance = cls(**args)
_qparams[name] = instance
result = _qparams[name]
return result
def param(name, *args, **kwargs):
"""
A wrapper for `tf.Variable` which enables parameter sharing in models.
Creates and returns theano shared variables similarly to `tf.Variable`,
except if you try to create a param with the same name as a
previously-created one, `param(...)` will just return the old one instead of
making a new one.
This constructor also adds a `param` attribute to the shared variables it
creates, so that you can easily search a graph for all params.
"""
if name not in _params:
kwargs['name'] = name
param = tf.Variable(*args, **kwargs)
param.param = True
_params[name] = param
result = _params[name]
i = 0
while result in _param_aliases:
# print 'following alias {}: {} to {}'.format(i, result, _param_aliases[result])
i += 1
result = _param_aliases[result]
return result
def params_with_name(name):
tf_params = [p for n,p in _params.items() if name in n]
keras_list = [p.trainable_weights for n,p in _qparams.items() if name in n]
keras_params = [item for sublist in keras_list for item in sublist]
print([n for n,p in _qparams.items() if name in n])
return tf_params + keras_params
def delete_all_params():
_params.clear()
def alias_params(replace_dict):
for old,new in replace_dict.items():
# print "aliasing {} to {}".format(old,new)
_param_aliases[old] = new
def delete_param_aliases():
_param_aliases.clear()
# def search(node, critereon):
# """
# Traverse the Theano graph starting at `node` and return a list of all nodes
# which match the `critereon` function. When optimizing a cost function, you
# can use this to get a list of all of the trainable params in the graph, like
# so:
# `lib.search(cost, lambda x: hasattr(x, "param"))`
# """
# def _search(node, critereon, visited):
# if node in visited:
# return []
# visited.add(node)
# results = []
# if isinstance(node, T.Apply):
# for inp in node.inputs:
# results += _search(inp, critereon, visited)
# else: # Variable node
# if critereon(node):
# results.append(node)
# if node.owner is not None:
# results += _search(node.owner, critereon, visited)
# return results
# return _search(node, critereon, set())
# def print_params_info(params):
# """Print information about the parameters in the given param set."""
# params = sorted(params, key=lambda p: p.name)
# values = [p.get_value(borrow=True) for p in params]
# shapes = [p.shape for p in values]
# print "Params for cost:"
# for param, value, shape in zip(params, values, shapes):
# print "\t{0} ({1})".format(
# param.name,
# ",".join([str(x) for x in shape])
# )
# total_param_count = 0
# for shape in shapes:
# param_count = 1
# for dim in shape:
# param_count *= dim
# total_param_count += param_count
# print "Total parameter count: {0}".format(
# locale.format("%d", total_param_count, grouping=True)
# )
def print_model_settings(locals_):
print("Uppercase local vars:")
all_vars = [(k,v) for (k,v) in locals_.items() if (k.isupper() and k!='T' and k!='SETTINGS' and k!='ALL_SETTINGS')]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print("\t{}: {}".format(var_name, var_value))
def print_model_settings_dict(settings):
print("Settings dict:")
all_vars = [(k,v) for (k,v) in settings.items()]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print("\t{}: {}".format(var_name, var_value))
|
the-stack_0_26994
|
from tkinter import *
from factory import Factory
Config = Factory.get('Config')
from ark.thread_handler import ThreadHandler
from ark.gui.tasks import GuiTasks
from ark.gui.control import Control
import time
class PyArcGui(Frame):
gui_title = "pyarc - Rcon for Ark Survival"
gui_size = "1200x700"
def __init__(self, master):
Frame.__init__(self,master)
self.pack(fill=BOTH, expand=1)
self.create_widgets()
ThreadHandler.create_thread(GuiTasks.loop)
def create_widgets(self):
self.feedback = Text(self,width=100,height=40,wrap=WORD)
self.feedback.place(x=0,y=0)
self.feedback_scrollbar = Scrollbar(self.feedback)
self.feedback_scrollbar.place(x=785,y=0,height=640)
self.feedback.config(yscrollcommand=self.feedback_scrollbar.set)
self.feedback_scrollbar.config(command=self.feedback.yview)
Label(self,text="Command:", width=10).place(y=650,x=0)
self.command = Entry(self, width=120)
self.command.bind('<Return>',Control.process_input)
self.command.place(y=650,x=80)
Label(self,text="Server version:", width=20, anchor=W).place(y=0,x=810)
self.server_version = Label(self,text="[Unknown]", width=20, anchor=W, relief=GROOVE)
self.server_version.place(y=0,x=960)
Label(self,text="Server address:", width=20, anchor=W).place(y=25,x=810)
self.server_info = Label(self,text=Config.rcon_host, width=20, anchor=W, relief=GROOVE)
self.server_info.place(y=25,x=960)
Label(self,text="Config file:", width=20, anchor=W).place(y=50,x=810)
self.config_file = Label(self,text=Config.filename, width=20, anchor=W, relief=GROOVE)
self.config_file.place(y=50,x=960)
Label(self,text="Last keepalive:", width=20, anchor=W).place(y=75,x=810)
self.last_keepalive = Label(self,text="Never", width=20, anchor=W, relief=GROOVE)
self.last_keepalive.place(y=75,x=960)
Label(self,text="Last server response:", width=20, anchor=W).place(y=100,x=810)
self.last_serverresponse = Label(self,text="Never", width=20, anchor=W, relief=GROOVE)
self.last_serverresponse.place(y=100,x=960)
Label(self,text="Last player activity:", width=20, anchor=W).place(y=125,x=810)
self.last_player_activity = Label(self,text="Never", width=20, anchor=W, relief=GROOVE)
self.last_player_activity.place(y=125,x=960)
Label(self,text="Active threads:", width=20, anchor=W).place(y=150,x=810)
self.active_threads = Label(self,text="", width=20, anchor=W, relief=GROOVE)
self.active_threads.place(y=150,x=960)
Label(self,text="List of players:").place(y=400,x=810)
self.player_list = Listbox(self, relief=SUNKEN, height=10, width=40)
self.player_list.insert(END,'[Not available]')
self.player_list.place(y=425,x=810)
Button(text='Restart Now',command=self.ev_restart_now, bg='#666', fg="#EEE").place(y=600,x=810)
Button(text='Restart 60min',command=lambda:self.ev_restart_min(60), bg='#666', fg="#EEE").place(y=600,x=900)
Button(text='Restart 30min',command=lambda:self.ev_restart_min(30), bg='#666', fg="#EEE").place(y=600,x=990)
Button(text='Restart 10min',command=lambda:self.ev_restart_min(10), bg='#666', fg="#EEE").place(y=600,x=1080)
def write(self,message):
self.feedback.insert(END,message)
self.feedback.see('end')
def log(self,message):
self.feedback.insert(END,message + "\n")
self.feedback.see('end')
def is_online(self):
#self.log('Not connected to RCON')
return True
def ev_restart_min(self,minutes):
if not self.is_online():
return False
from ark.rcon import Rcon
Rcon.delayed_restart(minutes)
def ev_restart_now(self):
if not self.is_online():
return False
self.log('Restart button pressed')
from ark.rcon import Rcon
Rcon.callback_restart()
|
the-stack_0_26995
|
import asyncio
import pytest
import time
from greenberry.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from greenberry.protocols.full_node_protocol import RespondBlock
from greenberry.server.server import GreenBerryServer
from greenberry.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from greenberry.types.peer_info import PeerInfo
from greenberry.util.ints import uint16, uint32, uint64
from greenberry.wallet.util.transaction_type import TransactionType
from greenberry.wallet.transaction_record import TransactionRecord
from greenberry.wallet.wallet_node import WalletNode
from greenberry.wallet.wallet_state_manager import WalletStateManager
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert, time_out_assert_not_none
from tests.wallet.cc_wallet.test_cc_wallet import tx_in_pool
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestWalletSimulator:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes_five_freeze(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_sim_two_wallets(self):
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.asyncio
async def test_wallet_coinbase(self, wallet_node):
num_blocks = 10
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
server_1: GreenBerryServer = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 2)
]
)
async def check_tx_are_pool_farm_rewards():
wsm: WalletStateManager = wallet_node.wallet_state_manager
all_txs = await wsm.get_all_transactions(1)
expected_count = (num_blocks + 1) * 2
if len(all_txs) != expected_count:
return False
pool_rewards = 0
farm_rewards = 0
for tx in all_txs:
if tx.type == TransactionType.COINBASE_REWARD:
pool_rewards += 1
elif tx.type == TransactionType.FEE_REWARD:
farm_rewards += 1
if pool_rewards != expected_count / 2:
return False
if farm_rewards != expected_count / 2:
return False
return True
await time_out_assert(10, check_tx_are_pool_farm_rewards, True)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_wallet_make_transaction(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
server_1 = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds)
tx = await wallet.generate_signed_transaction(
10,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
0,
)
await wallet.push_transaction(tx)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds - 10)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
new_funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, (2 * num_blocks))
]
)
await time_out_assert(5, wallet.get_confirmed_balance, new_funds - 10)
await time_out_assert(5, wallet.get_unconfirmed_balance, new_funds - 10)
@pytest.mark.asyncio
async def test_wallet_coinbase_reorg(self, wallet_node):
num_blocks = 5
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
fn_server = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await full_node_api.reorg_from_index_to_new_index(ReorgProtocol(uint32(2), uint32(num_blocks + 6), 32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 2)
]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_wallet_send_to_three_peers(self, three_sim_two_wallets):
num_blocks = 10
full_nodes, wallets = three_sim_two_wallets
wallet_0, wallet_server_0 = wallets[0]
full_node_api_0 = full_nodes[0]
full_node_api_1 = full_nodes[1]
full_node_api_2 = full_nodes[2]
full_node_0 = full_node_api_0.full_node
full_node_1 = full_node_api_1.full_node
full_node_2 = full_node_api_2.full_node
server_0 = full_node_0.server
server_1 = full_node_1.server
server_2 = full_node_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
# wallet0 <-> sever0
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
for i in range(0, num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
all_blocks = await full_node_api_0.get_all_full_blocks()
for block in all_blocks:
await full_node_1.respond_block(RespondBlock(block))
await full_node_2.respond_block(RespondBlock(block))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, 32 * b"0", 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert_not_none(5, full_node_0.mempool_manager.get_spendbundle, tx.spend_bundle.name())
# wallet0 <-> sever1
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), wallet_0.on_connect)
await time_out_assert_not_none(5, full_node_1.mempool_manager.get_spendbundle, tx.spend_bundle.name())
# wallet0 <-> sever2
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_2._port)), wallet_0.on_connect)
await time_out_assert_not_none(5, full_node_2.mempool_manager.get_spendbundle, tx.spend_bundle.name())
@pytest.mark.asyncio
async def test_wallet_make_transaction_hop(self, two_wallet_nodes_five_freeze):
num_blocks = 10
full_nodes, wallets = two_wallet_nodes_five_freeze
full_node_api_0 = full_nodes[0]
full_node_0 = full_node_api_0.full_node
server_0 = full_node_0.server
wallet_node_0, wallet_0_server = wallets[0]
wallet_node_1, wallet_1_server = wallets[1]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
await wallet_0_server.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await wallet_1_server.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
for i in range(0, num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet_0.get_confirmed_balance, funds)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, funds)
assert await wallet_0.get_confirmed_balance() == funds
assert await wallet_0.get_unconfirmed_balance() == funds
tx = await wallet_0.generate_signed_transaction(
10,
await wallet_node_1.wallet_state_manager.main_wallet.get_new_puzzlehash(),
0,
)
await wallet_0.push_transaction(tx)
# Full node height 11, wallet height 9
await time_out_assert(5, wallet_0.get_confirmed_balance, funds)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, funds - 10)
for i in range(0, 4):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
# here it's num_blocks + 1 because our last reward is included in the first block that we just farmed
new_funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
# Full node height 17, wallet height 15
await time_out_assert(5, wallet_0.get_confirmed_balance, new_funds - 10)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, new_funds - 10)
await time_out_assert(5, wallet_1.get_confirmed_balance, 10)
tx = await wallet_1.generate_signed_transaction(5, await wallet_0.get_new_puzzlehash(), 0)
await wallet_1.push_transaction(tx)
for i in range(0, 4):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await wallet_0.get_confirmed_balance()
await wallet_0.get_unconfirmed_balance()
await wallet_1.get_confirmed_balance()
await time_out_assert(5, wallet_0.get_confirmed_balance, new_funds - 5)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, new_funds - 5)
await time_out_assert(5, wallet_1.get_confirmed_balance, 5)
# @pytest.mark.asyncio
# async def test_wallet_finds_full_node(self):
# node_iters = [
# setup_full_node(
# test_constants,
# "blockchain_test.db",
# 11234,
# introducer_port=11236,
# simulator=False,
# ),
# setup_wallet_node(
# 11235,
# test_constants,
# None,
# introducer_port=11236,
# ),
# setup_introducer(11236),
# ]
#
# full_node_api = await node_iters[0].__anext__()
# wallet, wallet_server = await node_iters[1].__anext__()
# introducer, introducer_server = await node_iters[2].__anext__()
#
# async def has_full_node():
# outbound: List[WSGreenBerryConnection] = wallet.server.get_outgoing_connections()
# for connection in outbound:
# if connection.connection_type is NodeType.FULL_NODE:
# return True
# return False
#
# await time_out_assert(
# 2 * 60,
# has_full_node,
# True,
# )
# await _teardown_nodes(node_iters)
@pytest.mark.asyncio
async def test_wallet_make_transaction_with_fee(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_1.full_node.server._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds)
assert await wallet.get_confirmed_balance() == funds
assert await wallet.get_unconfirmed_balance() == funds
tx_amount = 3200000000000
tx_fee = 10
tx = await wallet.generate_signed_transaction(
tx_amount,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
tx_fee,
)
fees = tx.spend_bundle.fees()
assert fees == tx_fee
await wallet.push_transaction(tx)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds - tx_amount - tx_fee)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
new_funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(5, wallet.get_confirmed_balance, new_funds - tx_amount - tx_fee)
await time_out_assert(5, wallet.get_unconfirmed_balance, new_funds - tx_amount - tx_fee)
@pytest.mark.asyncio
async def test_wallet_create_hit_max_send_amount(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_1.full_node.server._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
primaries = []
for i in range(0, 600):
primaries.append({"puzzlehash": ph, "amount": 100000000 + i})
tx_split_coins = await wallet.generate_signed_transaction(1, ph, 0, primaries=primaries)
await wallet.push_transaction(tx_split_coins)
await time_out_assert(
15, tx_in_pool, True, full_node_1.full_node.mempool_manager, tx_split_coins.spend_bundle.name()
)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(90, wallet.get_confirmed_balance, funds)
max_sent_amount = await wallet.get_max_send_amount()
# 1) Generate transaction that is under the limit
under_limit_tx = None
try:
under_limit_tx = await wallet.generate_signed_transaction(
max_sent_amount - 1,
ph,
0,
)
except ValueError:
assert ValueError
assert under_limit_tx is not None
# 2) Generate transaction that is equal to limit
at_limit_tx = None
try:
at_limit_tx = await wallet.generate_signed_transaction(
max_sent_amount,
ph,
0,
)
except ValueError:
assert ValueError
assert at_limit_tx is not None
# 3) Generate transaction that is greater than limit
above_limit_tx = None
try:
above_limit_tx = await wallet.generate_signed_transaction(
max_sent_amount + 1,
ph,
0,
)
except ValueError:
pass
assert above_limit_tx is None
@pytest.mark.asyncio
async def test_wallet_prevent_fee_theft(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_1.full_node.server._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds)
assert await wallet.get_confirmed_balance() == funds
assert await wallet.get_unconfirmed_balance() == funds
tx_amount = 3200000000000
tx_fee = 300000000000
tx = await wallet.generate_signed_transaction(
tx_amount,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
tx_fee,
)
# extract coin_solution from generated spend_bundle
for cs in tx.spend_bundle.coin_solutions:
if cs.additions() == []:
stolen_cs = cs
# get a legit signature
stolen_sb = await wallet.sign_transaction([stolen_cs])
now = uint64(int(time.time()))
add_list = list(stolen_sb.additions())
rem_list = list(stolen_sb.removals())
name = stolen_sb.name()
stolen_tx = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=32 * b"0",
amount=0,
fee_amount=stolen_cs.coin.amount,
confirmed=False,
sent=uint32(0),
spend_bundle=stolen_sb,
additions=add_list,
removals=rem_list,
wallet_id=wallet.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=name,
)
await wallet.push_transaction(stolen_tx)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds - stolen_cs.coin.amount)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
# Funds have not decreased because stolen_tx was rejected
outstanding_coinbase_rewards = 2000000000000
await time_out_assert(5, wallet.get_confirmed_balance, funds + outstanding_coinbase_rewards)
await time_out_assert(5, wallet.get_confirmed_balance, funds + outstanding_coinbase_rewards)
@pytest.mark.asyncio
async def test_wallet_tx_reorg(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
fn_server = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet_node: WalletNode = wallet_node
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
ph2 = await wallet_2.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
# Waits a few seconds to receive rewards
all_blocks = await full_node_api.get_all_full_blocks()
# Ensure that we use a coin that we will not reorg out
coin = list(all_blocks[-3].get_included_reward_coins())[0]
await asyncio.sleep(5)
tx = await wallet.generate_signed_transaction(1000, ph2, coins={coin})
await wallet.push_transaction(tx)
await full_node_api.full_node.respond_transaction(tx.spend_bundle, tx.name)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
for i in range(0, 2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(5, wallet_2.get_confirmed_balance, 1000)
await time_out_assert(5, wallet_node.wallet_state_manager.blockchain.get_peak_height, 7)
peak_height = full_node_api.full_node.blockchain.get_peak().height
print(peak_height)
# Perform a reorg, which will revert the transaction in the full node and wallet, and cause wallet to resubmit
await full_node_api.reorg_from_index_to_new_index(
ReorgProtocol(uint32(peak_height - 3), uint32(peak_height + 3), 32 * b"0")
)
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, peak_height - 2)
]
)
await time_out_assert(7, full_node_api.full_node.blockchain.get_peak_height, peak_height + 3)
await time_out_assert(7, wallet_node.wallet_state_manager.blockchain.get_peak_height, peak_height + 3)
# Farm a few blocks so we can confirm the resubmitted transaction
for i in range(0, num_blocks):
await asyncio.sleep(1)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
# By this point, the transaction should be confirmed
print(await wallet.get_confirmed_balance())
await time_out_assert(15, wallet.get_confirmed_balance, funds - 1000)
unconfirmed = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(int(wallet.id()))
assert len(unconfirmed) == 0
tx_record = await wallet_node.wallet_state_manager.tx_store.get_transaction_record(tx.name)
removed = tx_record.removals[0]
added = tx_record.additions[0]
added_1 = tx_record.additions[1]
wallet_coin_record_rem = await wallet_node.wallet_state_manager.coin_store.get_coin_record(removed.name())
assert wallet_coin_record_rem.spent
coin_record_full_node = await full_node_api.full_node.coin_store.get_coin_record(removed.name())
assert coin_record_full_node.spent
add_1_coin_record_full_node = await full_node_api.full_node.coin_store.get_coin_record(added.name())
assert add_1_coin_record_full_node is not None
assert add_1_coin_record_full_node.confirmed_block_index > 0
add_2_coin_record_full_node = await full_node_api.full_node.coin_store.get_coin_record(added_1.name())
assert add_2_coin_record_full_node is not None
assert add_2_coin_record_full_node.confirmed_block_index > 0
|
the-stack_0_26996
|
import copy
import enum
import logging
import time
from typing import Tuple
import cv2
from matplotlib import pyplot as plt
from data_labeling import labeling_config
from data_labeling.frame_annotation import FrameAnnotation
from data_labeling.labeling_utils import get_extrapolated_ir_frame_heatmap_flipped
class KeyAction(enum.Enum):
UNKNOWN = None
a_ACCEPT = ord('a')
d_DISCARD = ord('d')
e_PREVIOUS = ord('e')
r_NEXT = ord('r')
c_CLEAR = ord('c')
z_RECTANGLE = ord('z')
x_POINT = ord('x')
n_AUTOMOVE_TO_NEXT_FRAME = ord('n')
q_QUIT = ord('q')
@classmethod
def from_pressed_key(cls, key):
try:
return cls(key)
except:
return KeyAction.UNKNOWN
def as_char(self):
return chr(self.value)
@classmethod
def get_help_message(cls):
return f'Key controls:\n' \
f' {KeyAction.a_ACCEPT.as_char()}) accept annotation and move to the next one\n' \
f' {KeyAction.d_DISCARD.as_char()}) discard annotation on this frame and move to the next one this frame will be marked as ignored)\n' \
f' {KeyAction.e_PREVIOUS.as_char()}) just move to to the previous frame\n' \
f' {KeyAction.r_NEXT.as_char()}) just move to to the next frame\n' \
f' {KeyAction.c_CLEAR.as_char()}) clear annotation on this frame\n' \
f' {KeyAction.z_RECTANGLE.as_char()}) annotate person with a rectangle\n' \
f' {KeyAction.x_POINT.as_char()}) annotate person centre with single point\n' \
f' {KeyAction.n_AUTOMOVE_TO_NEXT_FRAME.as_char()}) enable/disable automatic move to the next frame after button release\n' \
f' {KeyAction.q_QUIT.as_char()}) quit annotating'
class DrawingMode(enum.Enum):
RECTANGLE = KeyAction.z_RECTANGLE.value
SINGLE_POINT = KeyAction.x_POINT.value
class SingleFrameAnnotator:
def __init__(self, ir_frame, rgb_frame, drawing_mode: DrawingMode, initial_annotations: FrameAnnotation = None,
automove_to_next_frame_after_mouse_released=False):
self.ir_frame_interpolated = get_extrapolated_ir_frame_heatmap_flipped(
frame_2d=ir_frame,
multiplier=labeling_config.IR_FRAME_RESIZE_MULTIPLIER,
interpolation=labeling_config.IR_FRAME_INTERPOLATION_METHOD,
min_temp=labeling_config.MIN_TEMPERATURE_ON_PLOT,
max_temp=labeling_config.MAX_TEMPERATURE_ON_PLOT,
colormap=plt.get_cmap('inferno'))
self.ir_frame_pixel_resized = get_extrapolated_ir_frame_heatmap_flipped(
frame_2d=ir_frame,
multiplier=labeling_config.IR_FRAME_RESIZE_MULTIPLIER,
interpolation=cv2.INTER_NEAREST_EXACT,
min_temp=None,
max_temp=None,
colormap=plt.get_cmap('viridis'))
new_rgb_frame_size = (labeling_config.RGB_FRAME_RESIZE_MULTIPLIER * rgb_frame.shape[1],
labeling_config.RGB_FRAME_RESIZE_MULTIPLIER * rgb_frame.shape[0])
self.rgb_frame_resized = cv2.resize(src=rgb_frame, dsize=new_rgb_frame_size, interpolation=cv2.INTER_LINEAR)
self.initial_annotations = initial_annotations
self.new_annotations = copy.deepcopy(self.initial_annotations or FrameAnnotation())
self._button_press_location = None
self._last_mouse_location = None
self._button_pressed_and_released = False
self.automove_to_next_frame_after_mouse_released = automove_to_next_frame_after_mouse_released # for mode when we move to the next frame after annotaiting one point
self.drawing_mode = drawing_mode
def _draw_frame(self):
ir_frame_interpolated_with_annotation = copy.copy(self.ir_frame_interpolated)
ir_frame_pixel_resized_with_annotation = copy.copy(self.ir_frame_pixel_resized)
self.add_annotations_on_img(ir_frame_interpolated_with_annotation)
self.add_annotations_on_img(ir_frame_pixel_resized_with_annotation)
window_is_shown = False
try:
window_is_shown = (cv2.getWindowProperty("rgb_frame", 0) != -1.0)
except:
pass # windows is not shown
cv2.imshow('rgb_frame', self.rgb_frame_resized)
cv2.imshow('ir_frame_interpolated', ir_frame_interpolated_with_annotation)
cv2.imshow('ir_frame_pixel_resized', ir_frame_pixel_resized_with_annotation)
if not window_is_shown:
cv2.moveWindow("rgb_frame", 10, 10)
cv2.moveWindow("ir_frame_interpolated", 610, 10)
cv2.moveWindow("ir_frame_pixel_resized", 610, 470)
def get_annotation_for_frame(self) -> Tuple[KeyAction, FrameAnnotation]:
self._draw_frame()
cv2.setMouseCallback('ir_frame_interpolated', self._mouse_event)
cv2.setMouseCallback('ir_frame_pixel_resized', self._mouse_event)
key_action = KeyAction.UNKNOWN
while key_action not in [KeyAction.a_ACCEPT, KeyAction.d_DISCARD, KeyAction.e_PREVIOUS,
KeyAction.r_NEXT, KeyAction.q_QUIT]:
key = cv2.waitKey(100)
if key == -1:
if self.automove_to_next_frame_after_mouse_released and self._button_pressed_and_released:
key_action = KeyAction.r_NEXT # simulate action of moving to the next frame
else:
continue
else:
key_action = KeyAction.from_pressed_key(key)
logging.info(f"Key pressed action: {key_action.name}")
if key_action == KeyAction.x_POINT:
self.drawing_mode = DrawingMode.SINGLE_POINT
if key_action == KeyAction.z_RECTANGLE:
self.drawing_mode = DrawingMode.RECTANGLE
if key_action == KeyAction.n_AUTOMOVE_TO_NEXT_FRAME:
self.automove_to_next_frame_after_mouse_released = not self.automove_to_next_frame_after_mouse_released
logging.info(f"automove_to_next_frame_after_mouse_released state set to {self.automove_to_next_frame_after_mouse_released}")
if key_action == KeyAction.c_CLEAR:
self.new_annotations = FrameAnnotation()
self._draw_frame()
if key_action == KeyAction.a_ACCEPT:
self.new_annotations.accepted = True
self.new_annotations.discarded = False
key_action = KeyAction.r_NEXT
if key_action == KeyAction.d_DISCARD:
self.new_annotations.discarded = True
self.new_annotations.accepted = False
key_action = KeyAction.r_NEXT
return key_action, self.new_annotations
def _mouse_event(self, event, x, y, flags, param):
redraw = False
self._last_mouse_location = x, y
if event == cv2.EVENT_LBUTTONDOWN:
self._button_press_location = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if self._button_press_location is not None:
redraw = True
elif event == cv2.EVENT_LBUTTONUP:
self._add_annotation(button_release_location=(x, y))
self._button_press_location = None
redraw = True
self._button_pressed_and_released = True
if redraw:
self._draw_frame()
def _add_annotation(self, button_release_location):
drawing_mode = self.drawing_mode
button_press_location = self._button_press_location
logging.info(f"Adding annotation at {button_press_location}, mode {drawing_mode.name}")
if drawing_mode == DrawingMode.SINGLE_POINT:
self.new_annotations.centre_points.append(button_release_location)
elif drawing_mode == DrawingMode.RECTANGLE:
if not button_press_location:
logging.warning("No press location!")
return
self.new_annotations.rectangles.append((button_press_location, button_release_location))
def add_annotations_on_img(self, img):
if self.new_annotations.discarded:
color = (33, 33, 33)
elif self.new_annotations.accepted:
color = (0, 120, 60)
else:
color = (200, 100, 200)
for centre_point in self.new_annotations.centre_points:
cv2.circle(img=img, center=centre_point, color=color, radius=4, thickness=3)
for top_left, bottom_right in self.new_annotations.rectangles:
cv2.rectangle(img=img, pt1=top_left, pt2=bottom_right, color=color, thickness=2)
# draw also annotation in progress
if self.drawing_mode == DrawingMode.RECTANGLE and self._button_press_location:
cv2.rectangle(img=img,
pt1=self._button_press_location, pt2=self._last_mouse_location,
color=color, thickness=3)
if self.new_annotations.discarded:
cv2.line(img=img, pt1=(0,0), pt2=img.shape[:2][::-1], color=color, thickness=3)
cv2.line(img=img, pt1=(img.shape[1], 0), pt2=(0, img.shape[0]), color=color, thickness=3)
|
the-stack_0_26997
|
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from scipy.sparse.linalg import eigsh
from scipy.special import expit
import pytest
from sklearn.utils import gen_batches
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils.extmath import density, _safe_accumulator_op
from sklearn.utils.extmath import randomized_svd, _randomized_eigsh
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.datasets import make_low_rank_matrix, make_sparse_spd_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert density(X_) == density(X)
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis=axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def check_randomized_svd_low_rank(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0).astype(dtype, copy=False)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
U, s, Vt = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
Vt = Vt.astype(dtype, copy=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert Ua.shape == (n_samples, k)
assert sa.shape == (k,)
assert Va.shape == (k, n_features)
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], Vt[:k, :]), np.dot(Ua, Va),
decimal=decimal)
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == 'f'
assert sa.dtype.kind == 'f'
assert Va.dtype.kind == 'f'
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize('dtype',
(np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
check_randomized_svd_low_rank(dtype)
@pytest.mark.parametrize('dtype',
(np.int32, np.int64, np.float32, np.float64))
def test_randomized_eigsh(dtype):
"""Test that `_randomized_eigsh` returns the appropriate components"""
rng = np.random.RandomState(42)
X = np.diag(np.array([1., -2., 0., 3.], dtype=dtype))
# random rotation that preserves the eigenvalues of X
rand_rot = np.linalg.qr(rng.normal(size=X.shape))[0]
X = rand_rot @ X @ rand_rot.T
# with 'module' selection method, the negative eigenvalue shows up
eigvals, eigvecs = _randomized_eigsh(X, n_components=2, selection='module')
# eigenvalues
assert eigvals.shape == (2,)
assert_array_almost_equal(eigvals, [3., -2.]) # negative eigenvalue here
# eigenvectors
assert eigvecs.shape == (4, 2)
# with 'value' selection method, the negative eigenvalue does not show up
with pytest.raises(NotImplementedError):
_randomized_eigsh(X, n_components=2, selection='value')
@pytest.mark.parametrize('k', (10, 50, 100, 199, 200))
def test_randomized_eigsh_compared_to_others(k):
"""Check that `_randomized_eigsh` is similar to other `eigsh`
Tests that for a random PSD matrix, `_randomized_eigsh` provides results
comparable to LAPACK (scipy.linalg.eigh) and ARPACK
(scipy.sparse.linalg.eigsh).
Note: some versions of ARPACK do not support k=n_features.
"""
# make a random PSD matrix
n_features = 200
X = make_sparse_spd_matrix(n_features, random_state=0)
# compare two versions of randomized
# rough and fast
eigvals, eigvecs = _randomized_eigsh(X, n_components=k, selection='module',
n_iter=25, random_state=0)
# more accurate but slow (TODO find realistic settings here)
eigvals_qr, eigvecs_qr = _randomized_eigsh(
X, n_components=k, n_iter=25, n_oversamples=20, random_state=0,
power_iteration_normalizer="QR", selection='module'
)
# with LAPACK
eigvals_lapack, eigvecs_lapack = linalg.eigh(X, eigvals=(n_features - k,
n_features - 1))
indices = eigvals_lapack.argsort()[::-1]
eigvals_lapack = eigvals_lapack[indices]
eigvecs_lapack = eigvecs_lapack[:, indices]
# -- eigenvalues comparison
assert eigvals_lapack.shape == (k,)
# comparison precision
assert_array_almost_equal(eigvals, eigvals_lapack, decimal=6)
assert_array_almost_equal(eigvals_qr, eigvals_lapack, decimal=6)
# -- eigenvectors comparison
assert eigvecs_lapack.shape == (n_features, k)
# flip eigenvectors' sign to enforce deterministic output
dummy_vecs = np.zeros_like(eigvecs).T
eigvecs, _ = svd_flip(eigvecs, dummy_vecs)
eigvecs_qr, _ = svd_flip(eigvecs_qr, dummy_vecs)
eigvecs_lapack, _ = svd_flip(eigvecs_lapack, dummy_vecs)
assert_array_almost_equal(eigvecs, eigvecs_lapack, decimal=4)
assert_array_almost_equal(eigvecs_qr, eigvecs_lapack, decimal=6)
# comparison ARPACK ~ LAPACK (some ARPACK implems do not support k=n)
if k < n_features:
v0 = _init_arpack_v0(n_features, random_state=0)
# "LA" largest algebraic <=> selection="value" in randomized_eigsh
eigvals_arpack, eigvecs_arpack = eigsh(X, k, which="LA", tol=0,
maxiter=None, v0=v0)
indices = eigvals_arpack.argsort()[::-1]
# eigenvalues
eigvals_arpack = eigvals_arpack[indices]
assert_array_almost_equal(eigvals_lapack, eigvals_arpack, decimal=10)
# eigenvectors
eigvecs_arpack = eigvecs_arpack[:, indices]
eigvecs_arpack, _ = svd_flip(eigvecs_arpack, dummy_vecs)
assert_array_almost_equal(eigvecs_arpack, eigvecs_lapack, decimal=8)
@pytest.mark.parametrize("n,rank", [
(10, 7),
(100, 10),
(100, 80),
(500, 10),
(500, 250),
(500, 400),
])
def test_randomized_eigsh_reconst_low_rank(n, rank):
"""Check that randomized_eigsh is able to reconstruct a low rank psd matrix
Tests that the decomposition provided by `_randomized_eigsh` leads to
orthonormal eigenvectors, and that a low rank PSD matrix can be effectively
reconstructed with good accuracy using it.
"""
assert rank < n
# create a low rank PSD
rng = np.random.RandomState(69)
X = rng.randn(n, rank)
A = X @ X.T
# approximate A with the "right" number of components
S, V = _randomized_eigsh(A, n_components=rank, random_state=rng)
# orthonormality checks
assert_array_almost_equal(np.linalg.norm(V, axis=0), np.ones(S.shape))
assert_array_almost_equal(V.T @ V, np.diag(np.ones(S.shape)))
# reconstruction
A_reconstruct = V @ np.diag(S) @ V.T
# test that the approximation is good
assert_array_almost_equal(A_reconstruct, A, decimal=6)
@pytest.mark.parametrize('dtype',
(np.float32, np.float64))
def test_row_norms(dtype):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype, copy=False)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = sparse.csr_matrix(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype, copy=False)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype, copy=False)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr),
precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.01
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.1
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert X.shape == (n_samples, n_features)
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, Vt = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none',
random_state=0)
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord='fro')
U, s, Vt = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none',
random_state=0)
A = X - U.dot(np.diag(s).dot(Vt))
error_20 = linalg.norm(A, ord='fro')
assert np.abs(error_2 - error_20) > 100
for normalizer in ['LU', 'QR', 'auto']:
U, s, Vt = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, Vt = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(Vt))
error = linalg.norm(A, ord='fro')
assert 15 > np.abs(error_2 - error)
def test_randomized_svd_sparse_warnings():
# randomized_svd throws a warning for lil and dok matrix
rng = np.random.RandomState(42)
X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
n_components = 5
for cls in (sparse.lil_matrix, sparse.dok_matrix):
X = cls(X)
assert_warns_message(
sparse.SparseEfficiencyWarning,
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(cls.__name__),
randomized_svd, X, n_components, n_iter=1,
power_iteration_normalizer='none')
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, Vt = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, Vt = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True,
random_state=0)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert u_based
assert not v_based
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True, random_state=0)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert u_based
assert not v_based
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(expit(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
@pytest.fixture()
def rng():
return np.random.RandomState(42)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incremental_weighted_mean_and_variance_simple(rng, dtype):
mult = 10
X = rng.rand(1000, 20).astype(dtype)*mult
sample_weight = rng.rand(X.shape[0]) * mult
mean, var, _ = _incremental_mean_and_var(X, 0, 0, 0,
sample_weight=sample_weight)
expected_mean = np.average(X, weights=sample_weight, axis=0)
expected_var = np.average(X**2, weights=sample_weight, axis=0) - \
expected_mean**2
assert_almost_equal(mean, expected_mean)
assert_almost_equal(var, expected_var)
@pytest.mark.parametrize("mean", [0, 1e7, -1e7])
@pytest.mark.parametrize("var", [1, 1e-8, 1e5])
@pytest.mark.parametrize("weight_loc, weight_scale", [
(0, 1), (0, 1e-8), (1, 1e-8), (10, 1), (1e7, 1)])
def test_incremental_weighted_mean_and_variance(mean, var, weight_loc,
weight_scale, rng):
# Testing of correctness and numerical stability
def _assert(X, sample_weight, expected_mean, expected_var):
n = X.shape[0]
for chunk_size in [1, n//10 + 1, n//4 + 1, n//2 + 1, n]:
last_mean, last_weight_sum, last_var = 0, 0, 0
for batch in gen_batches(n, chunk_size):
last_mean, last_var, last_weight_sum = \
_incremental_mean_and_var(
X[batch], last_mean, last_var, last_weight_sum,
sample_weight=sample_weight[batch])
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-6)
size = (100, 20)
weight = rng.normal(loc=weight_loc, scale=weight_scale, size=size[0])
# Compare to weighted average: np.average
X = rng.normal(loc=mean, scale=var, size=size)
expected_mean = _safe_accumulator_op(np.average, X, weights=weight, axis=0)
expected_var = _safe_accumulator_op(
np.average, (X - expected_mean) ** 2, weights=weight, axis=0)
_assert(X, weight, expected_mean, expected_var)
# Compare to unweighted mean: np.mean
X = rng.normal(loc=mean, scale=var, size=size)
ones_weight = np.ones(size[0])
expected_mean = _safe_accumulator_op(np.mean, X, axis=0)
expected_var = _safe_accumulator_op(np.var, X, axis=0)
_assert(X, ones_weight, expected_mean, expected_var)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incremental_weighted_mean_and_variance_ignore_nan(dtype):
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_weight_sum = np.array([2, 2, 2, 2], dtype=np.int32)
sample_weights_X = np.ones(3)
sample_weights_X_nan = np.ones(4)
X = np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]]).astype(dtype)
X_nan = np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]]).astype(dtype)
X_means, X_variances, X_count = \
_incremental_mean_and_var(X,
old_means,
old_variances,
old_weight_sum,
sample_weight=sample_weights_X)
X_nan_means, X_nan_variances, X_nan_count = \
_incremental_mean_and_var(X_nan,
old_means,
old_variances,
old_weight_sum,
sample_weight=sample_weights_X_nan)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from https://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32)
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_mean_and_variance_ignore_nan():
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32)
X = np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]])
X_nan = np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]])
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_sample_count)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan, old_means, old_variances, old_sample_count)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
# Naive one pass var: >tol (=1063)
assert np.abs(np_var(A) - one_pass_var(A)).max() > tol
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert n == A.shape[0]
# the mean is also slightly unstable
assert np.abs(A.mean(axis=0) - mean).max() > 1e-6
assert np.abs(np_var(A) - var).max() > tol
# Robust implementation: <tol (177)
mean, var = A0[0, :], np.zeros(n_features)
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert tol > np.abs(np_var(A) - var).max()
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = np.full(batch.shape[1], batch.shape[0],
dtype=np.int32)
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_array_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
@pytest.mark.parametrize("A_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("B_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
def test_safe_sparse_dot_2d(A_array_constr, B_array_constr):
rng = np.random.RandomState(0)
A = rng.random_sample((30, 10))
B = rng.random_sample((10, 20))
expected = np.dot(A, B)
A = A_array_constr(A)
B = B_array_constr(B)
actual = safe_sparse_dot(A, B, dense_output=True)
assert_allclose(actual, expected)
def test_safe_sparse_dot_nd():
rng = np.random.RandomState(0)
# dense ND / sparse
A = rng.random_sample((2, 3, 4, 5, 6))
B = rng.random_sample((6, 7))
expected = np.dot(A, B)
B = sparse.csr_matrix(B)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
# sparse / dense ND
A = rng.random_sample((2, 3))
B = rng.random_sample((4, 5, 3, 6))
expected = np.dot(A, B)
A = sparse.csr_matrix(A)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
@pytest.mark.parametrize("A_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
def test_safe_sparse_dot_2d_1d(A_array_constr):
rng = np.random.RandomState(0)
B = rng.random_sample((10))
# 2D @ 1D
A = rng.random_sample((30, 10))
expected = np.dot(A, B)
A = A_array_constr(A)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
# 1D @ 2D
A = rng.random_sample((10, 30))
expected = np.dot(B, A)
A = A_array_constr(A)
actual = safe_sparse_dot(B, A)
assert_allclose(actual, expected)
@pytest.mark.parametrize("dense_output", [True, False])
def test_safe_sparse_dot_dense_output(dense_output):
rng = np.random.RandomState(0)
A = sparse.random(30, 10, density=0.1, random_state=rng)
B = sparse.random(10, 20, density=0.1, random_state=rng)
expected = A.dot(B)
actual = safe_sparse_dot(A, B, dense_output=dense_output)
assert sparse.issparse(actual) == (not dense_output)
if dense_output:
expected = expected.toarray()
assert_allclose_dense_sparse(actual, expected)
|
the-stack_0_26998
|
# Return the options to use for a C++ library or binary build.
# Uses the ":optmode" config_setting to pick the options.
load(
"//tensorflow/core/platform:build_config_root.bzl",
"if_dynamic_kernels",
"if_static",
"register_extension_info",
"tf_additional_grpc_deps_py",
"tf_additional_xla_deps_py",
"tf_exec_properties",
"tf_gpu_tests_tags",
"tf_sycl_tests_tags",
)
load(
"//tensorflow/core/platform:rules_cc.bzl",
"cc_binary",
"cc_library",
"cc_test",
)
load(
"@local_config_tensorrt//:build_defs.bzl",
"if_tensorrt",
)
load(
"//tensorflow/core/platform/default:cuda_build_defs.bzl",
"if_cuda_is_configured",
)
load(
"@local_config_cuda//cuda:build_defs.bzl",
"cuda_library",
"if_cuda",
)
load(
"@local_config_rocm//rocm:build_defs.bzl",
"if_rocm",
"if_rocm_is_configured",
"rocm_copts",
)
load(
"//third_party/mkl:build_defs.bzl",
"if_enable_mkl",
"if_mkl",
"if_mkl_lnx_x64",
"if_mkl_ml",
"mkl_deps",
)
load(
"//third_party/mkl_dnn:build_defs.bzl",
"if_mkl_open_source_only",
"if_mkl_v1_open_source_only",
)
load(
"//third_party/ngraph:build_defs.bzl",
"if_ngraph",
)
# version for the shared libraries, can
# not contain rc or alpha, only numbers.
# Also update tensorflow/core/public/version.h
# and tensorflow/tools/pip_package/setup.py
VERSION = "2.1.0"
VERSION_MAJOR = VERSION.split(".")[0]
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
def if_v2(a):
return select({
clean_dep("//tensorflow:api_version_2"): a,
"//conditions:default": [],
})
def if_not_v2(a):
return select({
clean_dep("//tensorflow:api_version_2"): [],
"//conditions:default": a,
})
def if_nvcc(a):
return select({
"@local_config_cuda//cuda:using_nvcc": a,
"//conditions:default": [],
})
def if_cuda_is_configured_compat(x):
return if_cuda_is_configured(x)
def if_xla_available(if_true, if_false = []):
return select({
clean_dep("//tensorflow:with_xla_support"): if_true,
"//conditions:default": if_false,
})
# Given a source file, generate a test name.
# i.e. "common_runtime/direct_session_test.cc" becomes
# "common_runtime_direct_session_test"
def src_to_test_name(src):
return src.replace("/", "_").replace(":", "_").split(".")[0]
def full_path(relative_paths):
return [native.package_name() + "/" + relative for relative in relative_paths]
def _add_tfcore_prefix(src):
if src.startswith("//"):
return src
return "//tensorflow/core:" + src
def tf_android_core_proto_headers(core_proto_sources_relative):
"""Returns the list of pb.h and proto.h headers that are generated for the provided sources."""
return ([
_add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".pb.h")
for p in core_proto_sources_relative
] + [
_add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".proto.h")
for p in core_proto_sources_relative
])
# Wrapper for portable protos which currently just creates an empty rule.
def tf_portable_proto_library(name, proto_deps, deps = [], **kwargs):
_ignore = [kwargs]
cc_library(name = name, deps = deps + [dep + "_cc" for dep in proto_deps])
def tf_portable_full_lite_protos(full, lite):
return select({
"//tensorflow:mobile_lite_protos": lite,
"//tensorflow:mobile_full_protos": full,
# The default should probably be lite runtime, but since most clients
# seem to use the non-lite version, let's make that the default for now.
"//conditions:default": full,
})
def if_android_x86(a):
return select({
clean_dep("//tensorflow:android_x86"): a,
clean_dep("//tensorflow:android_x86_64"): a,
"//conditions:default": [],
})
def if_android_arm(a):
return select({
clean_dep("//tensorflow:android_arm"): a,
"//conditions:default": [],
})
def if_android_arm64(a):
return select({
clean_dep("//tensorflow:android_arm64"): a,
"//conditions:default": [],
})
def if_android_mips(a):
return select({
clean_dep("//tensorflow:android_mips"): a,
"//conditions:default": [],
})
def if_not_android(a):
return select({
clean_dep("//tensorflow:android"): [],
"//conditions:default": a,
})
def if_not_android_mips_and_mips64(a):
return select({
clean_dep("//tensorflow:android_mips"): [],
clean_dep("//tensorflow:android_mips64"): [],
"//conditions:default": a,
})
def if_android(a):
return select({
clean_dep("//tensorflow:android"): a,
"//conditions:default": [],
})
def if_emscripten(a):
return select({
clean_dep("//tensorflow:emscripten"): a,
"//conditions:default": [],
})
def if_chromiumos(a, otherwise = []):
return select({
clean_dep("//tensorflow:chromiumos"): a,
"//conditions:default": otherwise,
})
def if_macos(a, otherwise = []):
return select({
clean_dep("//tensorflow:macos"): a,
"//conditions:default": otherwise,
})
def if_ios(a):
return select({
clean_dep("//tensorflow:ios"): a,
"//conditions:default": [],
})
def if_ios_x86_64(a):
return select({
clean_dep("//tensorflow:ios_x86_64"): a,
"//conditions:default": [],
})
def if_mobile(a):
return select({
clean_dep("//tensorflow:mobile"): a,
"//conditions:default": [],
})
def if_not_mobile(a):
return select({
clean_dep("//tensorflow:mobile"): [],
"//conditions:default": a,
})
# Config setting selector used when building for products
# which requires restricted licenses to be avoided.
def if_not_lgpl_restricted(a):
_ = (a,)
return select({
"//conditions:default": [],
})
def if_not_windows(a):
return select({
clean_dep("//tensorflow:windows"): [],
"//conditions:default": a,
})
def if_windows(a, otherwise = []):
return select({
clean_dep("//tensorflow:windows"): a,
"//conditions:default": otherwise,
})
def if_windows_cuda(a, otherwise = []):
return select({
clean_dep("//tensorflow:with_cuda_support_windows_override"): a,
"//conditions:default": otherwise,
})
def if_linux_x86_64(a):
return select({
clean_dep("//tensorflow:linux_x86_64"): a,
"//conditions:default": [],
})
def if_override_eigen_strong_inline(a):
return select({
clean_dep("//tensorflow:override_eigen_strong_inline"): a,
"//conditions:default": [],
})
def if_nccl(if_true, if_false = []):
return select({
"//tensorflow:no_nccl_support": if_false,
"//tensorflow:windows": if_false,
"//conditions:default": if_true,
})
def get_win_copts(is_external = False):
WINDOWS_COPTS = [
"/DPLATFORM_WINDOWS",
"/DEIGEN_HAS_C99_MATH",
"/DTENSORFLOW_USE_EIGEN_THREADPOOL",
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018", # -Wno-sign-compare
# Bazel's CROSSTOOL currently pass /EHsc to enable exception by
# default. We can't pass /EHs-c- to disable exception, otherwise
# we will get a waterfall of flag conflict warnings. Wait for
# Bazel to fix this.
# "/D_HAS_EXCEPTIONS=0",
# "/EHs-c-",
"/wd4577",
"/DNOGDI",
# Also see build:windows lines in tensorflow/opensource_only/.bazelrc
# where we set some other options globally.
]
if is_external:
return WINDOWS_COPTS + ["/UTF_COMPILE_LIBRARY"]
else:
return WINDOWS_COPTS + ["/DTF_COMPILE_LIBRARY"]
def tf_copts(
android_optimization_level_override = "-O2",
is_external = False,
allow_exceptions = False):
# For compatibility reasons, android_optimization_level_override
# is currently only being set for Android.
# To clear this value, and allow the CROSSTOOL default
# to be used, pass android_optimization_level_override=None
android_copts = [
"-DTF_LEAN_BINARY",
"-Wno-narrowing",
"-fomit-frame-pointer",
]
if android_optimization_level_override:
android_copts.append(android_optimization_level_override)
return (
if_not_windows([
"-DEIGEN_AVOID_STL_ARRAY",
"-Iexternal/gemmlowp",
"-Wno-sign-compare",
"-ftemplate-depth=900",
]) +
(if_not_windows(["-fno-exceptions"]) if not allow_exceptions else []) +
if_cuda(["-DGOOGLE_CUDA=1"]) +
if_nvcc(["-DTENSORFLOW_USE_NVCC=1"]) +
if_xla_available(["-DTENSORFLOW_USE_XLA=1"]) +
if_tensorrt(["-DGOOGLE_TENSORRT=1"]) +
if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"]) +
if_mkl_open_source_only(["-DINTEL_MKL_DNN_ONLY"]) +
if_mkl_v1_open_source_only(["-DENABLE_MKLDNN_V1"]) +
if_enable_mkl(["-DENABLE_MKL"]) +
if_ngraph(["-DINTEL_NGRAPH=1"]) +
if_android_arm(["-mfpu=neon"]) +
if_linux_x86_64(["-msse3"]) +
if_ios_x86_64(["-msse4.1"]) +
select({
clean_dep("//tensorflow:framework_shared_object"): [],
"//conditions:default": ["-DTENSORFLOW_MONOLITHIC_BUILD"],
}) +
select({
clean_dep("//tensorflow:android"): android_copts,
clean_dep("//tensorflow:macos"): [],
clean_dep("//tensorflow:windows"): get_win_copts(is_external),
clean_dep("//tensorflow:ios"): [],
clean_dep("//tensorflow:no_lgpl_deps"): ["-D__TENSORFLOW_NO_LGPL_DEPS__", "-pthread"],
"//conditions:default": ["-pthread"],
})
)
def tf_openmp_copts():
return if_mkl_lnx_x64(["-fopenmp"])
def tfe_xla_copts():
return select({
"//tensorflow:with_xla_support": ["-DTENSORFLOW_EAGER_USE_XLA"],
"//conditions:default": [],
})
def tf_opts_nortti():
return [
"-fno-rtti",
"-DGOOGLE_PROTOBUF_NO_RTTI",
"-DGOOGLE_PROTOBUF_NO_STATIC_INITIALIZER",
]
def tf_opts_nortti_if_android():
return if_android(tf_opts_nortti())
def tf_opts_nortti_if_mobile():
return if_mobile(tf_opts_nortti())
def tf_defines_nortti():
return [
"GOOGLE_PROTOBUF_NO_RTTI",
"GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER",
]
def tf_defines_nortti_if_android():
return if_android(tf_defines_nortti())
def tf_features_nomodules_if_android():
return if_android(["-use_header_modules"])
def tf_features_nomodules_if_mobile():
return if_mobile(["-use_header_modules"])
def tf_opts_nortti_if_lite_protos():
return tf_portable_full_lite_protos(
full = [],
lite = tf_opts_nortti(),
)
def tf_defines_nortti_if_lite_protos():
return tf_portable_full_lite_protos(
full = [],
lite = tf_defines_nortti(),
)
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate a library for that file.
def tf_gen_op_libs(op_lib_names, deps = None, is_external = True):
# Make library out of each op so it can also be used to generate wrappers
# for various languages.
if not deps:
deps = []
for n in op_lib_names:
cc_library(
name = n + "_op_lib",
copts = tf_copts(is_external = is_external),
srcs = ["ops/" + n + ".cc"],
deps = deps + [clean_dep("//tensorflow/core:framework")],
visibility = ["//visibility:public"],
alwayslink = 1,
linkstatic = 1,
)
def _make_search_paths(prefix, levels_to_root):
return ",".join(
[
"-rpath,%s/%s" % (prefix, "/".join([".."] * search_level))
for search_level in range(levels_to_root + 1)
],
)
def _rpath_linkopts(name):
# Search parent directories up to the TensorFlow root directory for shared
# object dependencies, even if this op shared object is deeply nested
# (e.g. tensorflow/contrib/package:python/ops/_op_lib.so). tensorflow/ is then
# the root and tensorflow/libtensorflow_framework.so should exist when
# deployed. Other shared object dependencies (e.g. shared between contrib/
# ops) are picked up as long as they are in either the same or a parent
# directory in the tensorflow/ tree.
levels_to_root = native.package_name().count("/") + name.count("/")
return select({
clean_dep("//tensorflow:macos"): [
"-Wl,%s" % (_make_search_paths("@loader_path", levels_to_root),),
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,%s" % (_make_search_paths("$$ORIGIN", levels_to_root),),
],
})
# Bazel-generated shared objects which must be linked into TensorFlow binaries
# to define symbols from //tensorflow/core:framework and //tensorflow/core:lib.
def tf_binary_additional_srcs(fullversion = False):
if fullversion:
suffix = "." + VERSION
else:
suffix = "." + VERSION_MAJOR
return if_static(
extra_deps = [],
macos = [
clean_dep("//tensorflow:libtensorflow_framework%s.dylib" % suffix),
],
otherwise = [
clean_dep("//tensorflow:libtensorflow_framework.so%s" % suffix),
],
)
def tf_binary_additional_data_deps():
return if_static(
extra_deps = [],
macos = [
clean_dep("//tensorflow:libtensorflow_framework.dylib"),
clean_dep("//tensorflow:libtensorflow_framework.%s.dylib" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow_framework.%s.dylib" % VERSION),
],
otherwise = [
clean_dep("//tensorflow:libtensorflow_framework.so"),
clean_dep("//tensorflow:libtensorflow_framework.so.%s" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow_framework.so.%s" % VERSION),
],
)
def tf_binary_pybind_deps():
return select({
clean_dep("//tensorflow:macos"): [
clean_dep(
"//tensorflow/python:_pywrap_tensorflow_internal_macos",
),
],
clean_dep("//tensorflow:windows"): [
clean_dep(
"//tensorflow/python:_pywrap_tensorflow_internal_windows",
),
],
"//conditions:default": [
clean_dep(
"//tensorflow/python:_pywrap_tensorflow_internal_linux",
),
],
})
# Helper function for the per-OS tensorflow libraries and their version symlinks
def tf_shared_library_deps():
return select({
clean_dep("//tensorflow:macos_with_framework_shared_object"): [
clean_dep("//tensorflow:libtensorflow.dylib"),
clean_dep("//tensorflow:libtensorflow.%s.dylib" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow.%s.dylib" % VERSION),
],
clean_dep("//tensorflow:macos"): [],
clean_dep("//tensorflow:windows"): [
clean_dep("//tensorflow:tensorflow.dll"),
clean_dep("//tensorflow:tensorflow_dll_import_lib"),
],
clean_dep("//tensorflow:framework_shared_object"): [
clean_dep("//tensorflow:libtensorflow.so"),
clean_dep("//tensorflow:libtensorflow.so.%s" % VERSION_MAJOR),
clean_dep("//tensorflow:libtensorflow.so.%s" % VERSION),
],
"//conditions:default": [],
}) + tf_binary_additional_srcs()
# Helper functions to add kernel dependencies to tf binaries when using dynamic
# kernel linking.
def tf_binary_dynamic_kernel_dsos():
return if_dynamic_kernels(
extra_deps = [
# TODO(gunan): Remove dependencies on these, and make them load dynamically.
# "//tensorflow/core/kernels:libtfkernel_all_kernels.so",
],
otherwise = [],
)
# Helper functions to add kernel dependencies to tf binaries when using static
# kernel linking.
def tf_binary_dynamic_kernel_deps(kernels):
return if_dynamic_kernels(
extra_deps = [],
otherwise = kernels,
)
# Shared libraries have different name pattern on different platforms,
# but cc_binary cannot output correct artifact name yet,
# so we generate multiple cc_binary targets with all name patterns when necessary.
# TODO(pcloudy): Remove this workaround when https://github.com/bazelbuild/bazel/issues/4570
# is done and cc_shared_library is available.
SHARED_LIBRARY_NAME_PATTERNS = [
"lib%s.so%s", # On Linux, shared libraries are usually named as libfoo.so
"lib%s%s.dylib", # On macos, shared libraries are usually named as libfoo.dylib
"%s%s.dll", # On Windows, shared libraries are usually named as foo.dll
]
def tf_cc_shared_object(
name,
srcs = [],
deps = [],
data = [],
linkopts = [],
framework_so = tf_binary_additional_srcs(),
soversion = None,
kernels = [],
per_os_targets = False, # Generate targets with SHARED_LIBRARY_NAME_PATTERNS
visibility = None,
**kwargs):
"""Configure the shared object (.so) file for TensorFlow."""
if soversion != None:
suffix = "." + str(soversion).split(".")[0]
longsuffix = "." + str(soversion)
else:
suffix = ""
longsuffix = ""
if per_os_targets:
names = [
(
pattern % (name, ""),
pattern % (name, suffix),
pattern % (name, longsuffix),
)
for pattern in SHARED_LIBRARY_NAME_PATTERNS
]
else:
names = [(
name,
name + suffix,
name + longsuffix,
)]
for name_os, name_os_major, name_os_full in names:
# Windows DLLs cant be versioned
if name_os.endswith(".dll"):
name_os_major = name_os
name_os_full = name_os
if name_os != name_os_major:
native.genrule(
name = name_os + "_sym",
outs = [name_os],
srcs = [name_os_major],
output_to_bindir = 1,
cmd = "ln -sf $$(basename $<) $@",
)
native.genrule(
name = name_os_major + "_sym",
outs = [name_os_major],
srcs = [name_os_full],
output_to_bindir = 1,
cmd = "ln -sf $$(basename $<) $@",
)
soname = name_os_major.split("/")[-1]
data_extra = []
if framework_so != []:
data_extra = tf_binary_additional_data_deps()
cc_binary(
name = name_os_full,
srcs = srcs + framework_so,
deps = deps,
linkshared = 1,
data = data + data_extra,
linkopts = linkopts + _rpath_linkopts(name_os_full) + select({
clean_dep("//tensorflow:macos"): [
"-Wl,-install_name,@rpath/" + soname,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,-soname," + soname,
],
}),
visibility = visibility,
**kwargs
)
flat_names = [item for sublist in names for item in sublist]
if name not in flat_names:
native.filegroup(
name = name,
srcs = select({
"//tensorflow:windows": [":%s.dll" % (name)],
"//tensorflow:macos": [":lib%s%s.dylib" % (name, longsuffix)],
"//conditions:default": [":lib%s.so%s" % (name, longsuffix)],
}),
visibility = visibility,
)
register_extension_info(
extension_name = "tf_cc_shared_object",
label_regex_for_dep = "{extension_name}",
)
# Links in the framework shared object
# (//third_party/tensorflow:libtensorflow_framework.so) when not building
# statically. Also adds linker options (rpaths) so that the framework shared
# object can be found.
def tf_cc_binary(
name,
srcs = [],
deps = [],
data = [],
linkopts = [],
copts = tf_copts(),
kernels = [],
per_os_targets = False, # Generate targets with SHARED_LIBRARY_NAME_PATTERNS
visibility = None,
**kwargs):
if kernels:
added_data_deps = tf_binary_dynamic_kernel_dsos()
else:
added_data_deps = []
if per_os_targets:
names = [pattern % (name, "") for pattern in SHARED_LIBRARY_NAME_PATTERNS]
else:
names = [name]
for name_os in names:
cc_binary(
name = name_os,
copts = copts,
srcs = srcs + tf_binary_additional_srcs(),
deps = deps + tf_binary_dynamic_kernel_deps(kernels) + if_mkl_ml(
[
clean_dep("//third_party/mkl:intel_binary_blob"),
],
) + if_static(
extra_deps = [],
otherwise = [
clean_dep("//tensorflow:libtensorflow_framework_import_lib"),
],
),
data = depset(data + added_data_deps),
linkopts = linkopts + _rpath_linkopts(name_os),
visibility = visibility,
**kwargs
)
if name not in names:
native.filegroup(
name = name,
srcs = select({
"//tensorflow:windows": [":%s.dll" % name],
"//tensorflow:macos": [":lib%s.dylib" % name],
"//conditions:default": [":lib%s.so" % name],
}),
visibility = visibility,
)
register_extension_info(
extension_name = "tf_cc_binary",
label_regex_for_dep = "{extension_name}.*",
)
# A simple wrap around native.cc_binary rule.
# When using this rule, you should realize it doesn't link to any tensorflow
# dependencies by default.
def tf_native_cc_binary(
name,
copts = tf_copts(),
linkopts = [],
**kwargs):
cc_binary(
name = name,
copts = copts,
linkopts = select({
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:macos"): [
"-lm",
],
"//conditions:default": [
"-lpthread",
"-lm",
],
}) + linkopts + _rpath_linkopts(name),
**kwargs
)
register_extension_info(
extension_name = "tf_native_cc_binary",
label_regex_for_dep = "{extension_name}.*",
)
def tf_gen_op_wrapper_cc(
name,
out_ops_file,
pkg = "",
op_gen = clean_dep("//tensorflow/cc:cc_op_gen_main"),
deps = None,
include_internal_ops = 0,
# ApiDefs will be loaded in the order specified in this list.
api_def_srcs = []):
# Construct an op generator binary for these ops.
tool = out_ops_file + "_gen_cc"
if deps == None:
deps = [pkg + ":" + name + "_op_lib"]
tf_cc_binary(
name = tool,
copts = tf_copts(),
linkopts = if_not_windows(["-lm", "-Wl,-ldl"]),
linkstatic = 1, # Faster to link this one-time-use binary dynamically
deps = [op_gen] + deps,
)
srcs = api_def_srcs[:]
if not api_def_srcs:
api_def_args_str = ","
else:
api_def_args = []
for api_def_src in api_def_srcs:
# Add directory of the first ApiDef source to args.
# We are assuming all ApiDefs in a single api_def_src are in the
# same directory.
api_def_args.append(
" $$(dirname $$(echo $(locations " + api_def_src +
") | cut -d\" \" -f1))",
)
api_def_args_str = ",".join(api_def_args)
native.genrule(
name = name + "_genrule",
outs = [
out_ops_file + ".h",
out_ops_file + ".cc",
out_ops_file + "_internal.h",
out_ops_file + "_internal.cc",
],
srcs = srcs,
exec_tools = [":" + tool] + tf_binary_additional_srcs(),
cmd = ("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
"$(location :" + out_ops_file + ".cc) " +
str(include_internal_ops) + " " + api_def_args_str),
)
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate individual C++ .cc and .h
# files for each of the ops files mentioned, and then generate a
# single cc_library called "name" that combines all the
# generated C++ code.
#
# For example, for:
# tf_gen_op_wrappers_cc("tf_ops_lib", [ "array_ops", "math_ops" ])
#
#
# This will ultimately generate ops/* files and a library like:
#
# cc_library(name = "tf_ops_lib",
# srcs = [ "ops/array_ops.cc",
# "ops/math_ops.cc" ],
# hdrs = [ "ops/array_ops.h",
# "ops/math_ops.h" ],
# deps = [ ... ])
#
# Plus a private library for the "hidden" ops.
# cc_library(name = "tf_ops_lib_internal",
# srcs = [ "ops/array_ops_internal.cc",
# "ops/math_ops_internal.cc" ],
# hdrs = [ "ops/array_ops_internal.h",
# "ops/math_ops_internal.h" ],
# deps = [ ... ])
# TODO(joshl): Cleaner approach for hidden ops.
def tf_gen_op_wrappers_cc(
name,
op_lib_names = [],
other_srcs = [],
other_hdrs = [],
other_srcs_internal = [],
other_hdrs_internal = [],
pkg = "",
deps = [
clean_dep("//tensorflow/cc:ops"),
clean_dep("//tensorflow/cc:scope"),
clean_dep("//tensorflow/cc:const_op"),
],
deps_internal = [],
op_gen = clean_dep("//tensorflow/cc:cc_op_gen_main"),
include_internal_ops = 0,
visibility = None,
# ApiDefs will be loaded in the order specified in this list.
api_def_srcs = [],
# Any extra dependencies that the wrapper generator might need.
extra_gen_deps = []):
subsrcs = other_srcs[:]
subhdrs = other_hdrs[:]
internalsrcs = other_srcs_internal[:]
internalhdrs = other_hdrs_internal[:]
for n in op_lib_names:
tf_gen_op_wrapper_cc(
n,
"ops/" + n,
api_def_srcs = api_def_srcs,
include_internal_ops = include_internal_ops,
op_gen = op_gen,
pkg = pkg,
deps = [pkg + ":" + n + "_op_lib"] + extra_gen_deps,
)
subsrcs += ["ops/" + n + ".cc"]
subhdrs += ["ops/" + n + ".h"]
internalsrcs += ["ops/" + n + "_internal.cc"]
internalhdrs += ["ops/" + n + "_internal.h"]
cc_library(
name = name,
srcs = subsrcs,
hdrs = subhdrs,
deps = deps + if_not_android([
clean_dep("//tensorflow/core:core_cpu"),
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:lib"),
clean_dep("//tensorflow/core:ops"),
clean_dep("//tensorflow/core:protos_all_cc"),
]) + if_android([
clean_dep("//tensorflow/core:android_tensorflow_lib"),
]),
copts = tf_copts(),
alwayslink = 1,
visibility = visibility,
)
cc_library(
name = name + "_internal",
srcs = internalsrcs,
hdrs = internalhdrs,
deps = deps + deps_internal + if_not_android([
clean_dep("//tensorflow/core:core_cpu"),
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:lib"),
clean_dep("//tensorflow/core:ops"),
clean_dep("//tensorflow/core:protos_all_cc"),
]) + if_android([
clean_dep("//tensorflow/core:android_tensorflow_lib"),
]),
copts = tf_copts(),
alwayslink = 1,
visibility = [clean_dep("//tensorflow:internal")],
)
# Generates a Python library target wrapping the ops registered in "deps".
#
# Args:
# name: used as the name of the generated target and as a name component of
# the intermediate files.
# out: name of the python file created by this rule. If None, then
# "ops/gen_{name}.py" is used.
# hidden: Optional list of ops names to make private in the Python module.
# It is invalid to specify both "hidden" and "op_whitelist".
# visibility: passed to py_library.
# deps: list of dependencies for the intermediate tool used to generate the
# python target. NOTE these `deps` are not applied to the final python
# library target itself.
# require_shape_functions: Unused. Leave this as False.
# hidden_file: optional file that contains a list of op names to make private
# in the generated Python module. Each op name should be on a line by
# itself. Lines that start with characters that are invalid op name
# starting characters are treated as comments and ignored.
# generated_target_name: name of the generated target (overrides the
# "name" arg)
# op_whitelist: if not empty, only op names in this list will be wrapped. It
# is invalid to specify both "hidden" and "op_whitelist".
# cc_linkopts: Optional linkopts to be added to tf_cc_binary that contains the
# specified ops.
def tf_gen_op_wrapper_py(
name,
out = None,
hidden = None,
visibility = None,
deps = [],
require_shape_functions = False,
hidden_file = None,
generated_target_name = None,
op_whitelist = [],
cc_linkopts = [],
api_def_srcs = []):
_ = require_shape_functions # Unused.
if (hidden or hidden_file) and op_whitelist:
fail("Cannot pass specify both hidden and op_whitelist.")
# Construct a cc_binary containing the specified ops.
tool_name = "gen_" + name + "_py_wrappers_cc"
if not deps:
deps = [str(Label("//tensorflow/core:" + name + "_op_lib"))]
tf_cc_binary(
name = tool_name,
copts = tf_copts(),
linkopts = if_not_windows(["-lm", "-Wl,-ldl"]) + cc_linkopts,
linkstatic = 1, # Faster to link this one-time-use binary dynamically
visibility = [clean_dep("//tensorflow:internal")],
deps = ([
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/python:python_op_gen_main"),
] + deps),
)
# Invoke the previous cc_binary to generate a python file.
if not out:
out = "ops/gen_" + name + ".py"
if hidden:
op_list_arg = ",".join(hidden)
op_list_is_whitelist = False
elif op_whitelist:
op_list_arg = ",".join(op_whitelist)
op_list_is_whitelist = True
else:
op_list_arg = "''"
op_list_is_whitelist = False
# Prepare ApiDef directories to pass to the genrule.
if not api_def_srcs:
api_def_args_str = ","
else:
api_def_args = []
for api_def_src in api_def_srcs:
# Add directory of the first ApiDef source to args.
# We are assuming all ApiDefs in a single api_def_src are in the
# same directory.
api_def_args.append(
"$$(dirname $$(echo $(locations " + api_def_src +
") | cut -d\" \" -f1))",
)
api_def_args_str = ",".join(api_def_args)
if hidden_file:
# `hidden_file` is file containing a list of op names to be hidden in the
# generated module.
native.genrule(
name = name + "_pygenrule",
outs = [out],
srcs = api_def_srcs + [hidden_file],
exec_tools = [tool_name] + tf_binary_additional_srcs(),
cmd = ("$(location " + tool_name + ") " + api_def_args_str +
" @$(location " + hidden_file + ") > $@"),
)
else:
native.genrule(
name = name + "_pygenrule",
outs = [out],
srcs = api_def_srcs,
exec_tools = [tool_name] + tf_binary_additional_srcs(),
cmd = ("$(location " + tool_name + ") " + api_def_args_str + " " +
op_list_arg + " " +
("1" if op_list_is_whitelist else "0") + " > $@"),
)
# Make a py_library out of the generated python file.
if not generated_target_name:
generated_target_name = name
native.py_library(
name = generated_target_name,
srcs = [out],
srcs_version = "PY2AND3",
visibility = visibility,
deps = [
clean_dep("//tensorflow/python:framework_for_generated_wrappers_v2"),
],
# Instruct build_cleaner to try to avoid using this rule; typically ops
# creators will provide their own tf_custom_op_py_library based target
# that wraps this one.
tags = ["avoid_dep"],
)
# Define a bazel macro that creates cc_test for tensorflow.
#
# Links in the framework shared object
# (//third_party/tensorflow:libtensorflow_framework.so) when not building
# statically. Also adds linker options (rpaths) so that the framework shared
# object can be found.
#
# TODO(opensource): we need to enable this to work around the hidden symbol
# __cudaRegisterFatBinary error. Need more investigations.
def tf_cc_test(
name,
srcs,
deps,
data = [],
linkstatic = 0,
extra_copts = [],
suffix = "",
linkopts = [],
kernels = [],
**kwargs):
cc_test(
name = "%s%s" % (name, suffix),
srcs = srcs + tf_binary_additional_srcs(),
copts = tf_copts() + extra_copts,
linkopts = select({
clean_dep("//tensorflow:android"): [
"-pie",
],
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:macos"): [
"-lm",
],
"//conditions:default": [
"-lpthread",
"-lm",
],
}) + linkopts + _rpath_linkopts(name),
deps = deps + tf_binary_dynamic_kernel_deps(kernels) + if_mkl_ml(
[
clean_dep("//third_party/mkl:intel_binary_blob"),
],
),
data = data +
tf_binary_dynamic_kernel_dsos() +
tf_binary_additional_srcs(),
exec_properties = tf_exec_properties(kwargs),
# Nested select() statements seem not to be supported when passed to
# linkstatic, and we already have a cuda select() passed in to this
# function.
linkstatic = linkstatic or select({
# cc_tests with ".so"s in srcs incorrectly link on Darwin unless
# linkstatic=1 (https://github.com/bazelbuild/bazel/issues/3450).
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
clean_dep("//tensorflow:macos"): 1,
"//conditions:default": 0,
}),
**kwargs
)
register_extension_info(
extension_name = "tf_cc_test",
label_regex_for_dep = "{extension_name}.*",
)
# Part of the testing workflow requires a distinguishable name for the build
# rules that involve a GPU, even if otherwise identical to the base rule.
def tf_cc_test_gpu(
name,
srcs,
deps,
linkstatic = 0,
tags = [],
data = [],
size = "medium",
suffix = "",
args = None):
tf_cc_test(
name,
srcs,
deps,
size = size,
args = args,
data = data,
linkstatic = linkstatic,
suffix = suffix,
tags = tags,
)
register_extension_info(
extension_name = "tf_cc_test_gpu",
label_regex_for_dep = "{extension_name}",
)
def tf_gpu_cc_test(
name,
srcs = [],
deps = [],
tags = [],
data = [],
size = "medium",
extra_copts = [],
linkstatic = 0,
args = [],
kernels = [],
linkopts = []):
tf_cc_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
extra_copts = extra_copts,
kernels = kernels,
linkopts = linkopts,
linkstatic = linkstatic,
tags = tags + ["manual"],
deps = deps,
)
tf_cc_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
extra_copts = extra_copts,
kernels = kernels,
linkopts = linkopts,
linkstatic = select({
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
clean_dep("//tensorflow:macos"): 1,
"@local_config_cuda//cuda:using_nvcc": 1,
"@local_config_cuda//cuda:using_clang": 1,
"//conditions:default": 0,
}),
suffix = "_gpu",
tags = tags + tf_gpu_tests_tags(),
deps = deps + if_cuda_is_configured([
clean_dep("//tensorflow/core:gpu_runtime"),
]) + if_rocm_is_configured([
clean_dep("//tensorflow/core:gpu_runtime"),
]),
)
register_extension_info(
extension_name = "tf_gpu_cc_test",
label_regex_for_dep = "{extension_name}",
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_cc_test(*args, **kwargs):
tf_gpu_cc_test(*args, **kwargs)
register_extension_info(
extension_name = "tf_cuda_cc_test",
label_regex_for_dep = "{extension_name}",
)
def tf_gpu_only_cc_test(
name,
srcs = [],
deps = [],
tags = [],
data = [],
size = "medium",
linkstatic = 0,
args = [],
kernels = [],
linkopts = []):
tags = tags + tf_gpu_tests_tags()
gpu_lib_name = "%s%s" % (name, "_gpu_lib")
tf_gpu_kernel_library(
name = gpu_lib_name,
srcs = srcs + tf_binary_additional_srcs(),
deps = deps,
testonly = 1,
)
cc_test(
name = "%s%s" % (name, "_gpu"),
size = size,
args = args,
features = if_cuda(["-use_header_modules"]),
data = data + tf_binary_dynamic_kernel_dsos(),
deps = [":" + gpu_lib_name],
linkopts = if_not_windows(["-lpthread", "-lm"]) + linkopts + _rpath_linkopts(name),
linkstatic = linkstatic or select({
# cc_tests with ".so"s in srcs incorrectly link on Darwin
# unless linkstatic=1.
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
clean_dep("//tensorflow:macos"): 1,
"//conditions:default": 0,
}),
tags = tags,
exec_properties = tf_exec_properties({"tags": tags}),
)
register_extension_info(
extension_name = "tf_gpu_only_cc_test",
label_regex_for_dep = "{extension_name}_gpu",
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_only_cc_test(*args, **kwargs):
tf_gpu_only_cc_test(*args, **kwargs)
register_extension_info(
extension_name = "tf_cuda_only_cc_test",
label_regex_for_dep = "{extension_name}_gpu",
)
# Create a cc_test for each of the tensorflow tests listed in "tests", along
# with a test suite of the given name, if provided.
def tf_cc_tests(
srcs,
deps,
name = "",
linkstatic = 0,
tags = [],
size = "medium",
args = None,
linkopts = [],
kernels = [],
create_named_test_suite = False,
visibility = None):
test_names = []
for src in srcs:
test_name = src_to_test_name(src)
tf_cc_test(
name = test_name,
size = size,
srcs = [src],
args = args,
kernels = kernels,
linkopts = linkopts,
linkstatic = linkstatic,
tags = tags,
deps = deps,
visibility = visibility,
)
test_names.append(test_name)
# Add a test suite with the generated tests if a name was provided and
# it does not conflict any of the test names.
if create_named_test_suite:
native.test_suite(
name = name,
tests = test_names,
visibility = visibility,
)
def tf_cc_test_mkl(
srcs,
deps,
name = "",
data = [],
linkstatic = 0,
tags = [],
size = "medium",
kernels = [],
args = None):
# -fno-exceptions in nocopts breaks compilation if header modules are enabled.
disable_header_modules = ["-use_header_modules"]
for src in srcs:
cc_test(
name = src_to_test_name(src),
srcs = if_mkl([src]) + tf_binary_additional_srcs(),
copts = tf_copts(allow_exceptions = True) + tf_openmp_copts(),
linkopts = select({
clean_dep("//tensorflow:android"): [
"-pie",
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-lpthread",
"-lm",
],
}) + _rpath_linkopts(src_to_test_name(src)),
deps = deps + tf_binary_dynamic_kernel_deps(kernels) + mkl_deps(),
data = data + tf_binary_dynamic_kernel_dsos(),
exec_properties = tf_exec_properties({"tags": tags}),
linkstatic = linkstatic,
tags = tags,
size = size,
args = args,
features = disable_header_modules,
)
def tf_cc_tests_gpu(
srcs,
deps,
name = "",
linkstatic = 0,
tags = [],
size = "medium",
kernels = [],
args = None):
tf_cc_tests(srcs, deps, linkstatic, size = size, args = args, kernels = kernels, tags = tags)
def tf_gpu_cc_tests(
srcs,
deps,
name = "",
tags = [],
size = "medium",
linkstatic = 0,
args = None,
kernels = [],
linkopts = []):
for src in srcs:
tf_gpu_cc_test(
name = src_to_test_name(src),
size = size,
srcs = [src],
args = args,
kernels = kernels,
linkopts = linkopts,
linkstatic = linkstatic,
tags = tags,
deps = deps,
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_cc_tests(*args, **kwargs):
tf_gpu_cc_tests(*args, **kwargs)
def tf_java_test(
name,
srcs = [],
deps = [],
kernels = [],
*args,
**kwargs):
native.java_test(
name = name,
srcs = srcs,
deps = deps + tf_binary_additional_srcs(fullversion = True) + tf_binary_dynamic_kernel_dsos() + tf_binary_dynamic_kernel_deps(kernels),
*args,
**kwargs
)
register_extension_info(
extension_name = "tf_java_test",
label_regex_for_dep = "{extension_name}",
)
def _cuda_copts(opts = []):
"""Gets the appropriate set of copts for (maybe) CUDA compilation.
If we're doing CUDA compilation, returns copts for our particular CUDA
compiler. If we're not doing CUDA compilation, returns an empty list.
"""
return select({
"//conditions:default": [],
"@local_config_cuda//cuda:using_nvcc": ([
"-nvcc_options=relaxed-constexpr",
"-nvcc_options=ftz=true",
]),
"@local_config_cuda//cuda:using_clang": ([
"-fcuda-flush-denormals-to-zero",
]),
}) + if_cuda_is_configured_compat(opts)
# Build defs for TensorFlow kernels
# When this target is built using --config=cuda, a cc_library is built
# that passes -DGOOGLE_CUDA=1 and '-x cuda', linking in additional
# libraries needed by GPU kernels.
#
# When this target is built using --config=rocm, a cc_library is built
# that passes -DTENSORFLOW_USE_ROCM and '-x rocm', linking in additional
# libraries needed by GPU kernels.
def tf_gpu_kernel_library(
srcs,
copts = [],
cuda_copts = [],
deps = [],
hdrs = [],
**kwargs):
copts = copts + tf_copts() + _cuda_copts(opts = cuda_copts) + rocm_copts(opts = cuda_copts)
kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
cuda_library(
srcs = srcs,
hdrs = hdrs,
copts = copts,
deps = deps + if_cuda_is_configured_compat([
clean_dep("//tensorflow/stream_executor/cuda:cudart_stub"),
clean_dep("//tensorflow/core:gpu_lib"),
]) + if_rocm_is_configured([
clean_dep("//tensorflow/core:gpu_lib"),
]),
alwayslink = 1,
**kwargs
)
register_extension_info(
extension_name = "tf_gpu_kernel_library",
label_regex_for_dep = "{extension_name}",
)
def tf_gpu_library(deps = None, cuda_deps = None, copts = tf_copts(), **kwargs):
"""Generate a cc_library with a conditional set of CUDA dependencies.
When the library is built with --config=cuda:
- Both deps and cuda_deps are used as dependencies.
- The cuda runtime is added as a dependency (if necessary).
- The library additionally passes -DGOOGLE_CUDA=1 to the list of copts.
- In addition, when the library is also built with TensorRT enabled, it
additionally passes -DGOOGLE_TENSORRT=1 to the list of copts.
Args:
- cuda_deps: BUILD dependencies which will be linked if and only if:
'--config=cuda' is passed to the bazel command line.
- deps: dependencies which will always be linked.
- copts: copts always passed to the cc_library.
- kwargs: Any other argument to cc_library.
"""
if not deps:
deps = []
if not cuda_deps:
cuda_deps = []
kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
cc_library(
deps = deps + if_cuda_is_configured_compat(cuda_deps + [
clean_dep("//tensorflow/stream_executor/cuda:cudart_stub"),
"@local_config_cuda//cuda:cuda_headers",
]) + if_rocm_is_configured(cuda_deps + [
"@local_config_rocm//rocm:rocm_headers",
]),
copts = (copts + if_cuda(["-DGOOGLE_CUDA=1"]) + if_rocm(["-DTENSORFLOW_USE_ROCM=1"]) + if_xla_available(["-DTENSORFLOW_USE_XLA=1"]) + if_mkl(["-DINTEL_MKL=1"]) + if_mkl_open_source_only(["-DINTEL_MKL_DNN_ONLY"]) + if_enable_mkl(["-DENABLE_MKL"]) + if_tensorrt(["-DGOOGLE_TENSORRT=1"])),
**kwargs
)
register_extension_info(
extension_name = "tf_gpu_library",
label_regex_for_dep = "{extension_name}",
)
# terminology changes: saving tf_cuda_* definition for compatibility
def tf_cuda_library(*args, **kwargs):
tf_gpu_library(*args, **kwargs)
register_extension_info(
extension_name = "tf_cuda_library",
label_regex_for_dep = "{extension_name}",
)
def tf_kernel_library(
name,
prefix = None,
srcs = None,
gpu_srcs = None,
hdrs = None,
deps = None,
alwayslink = 1,
copts = None,
gpu_copts = None,
is_external = False,
**kwargs):
"""A rule to build a TensorFlow OpKernel.
May either specify srcs/hdrs or prefix. Similar to tf_gpu_library,
but with alwayslink=1 by default. If prefix is specified:
* prefix*.cc (except *.cu.cc) is added to srcs
* prefix*.h (except *.cu.h) is added to hdrs
* prefix*.cu.cc and prefix*.h (including *.cu.h) are added to gpu_srcs.
With the exception that test files are excluded.
For example, with prefix = "cast_op",
* srcs = ["cast_op.cc"]
* hdrs = ["cast_op.h"]
* gpu_srcs = ["cast_op_gpu.cu.cc", "cast_op.h"]
* "cast_op_test.cc" is excluded
With prefix = "cwise_op"
* srcs = ["cwise_op_abs.cc", ..., "cwise_op_tanh.cc"],
* hdrs = ["cwise_ops.h", "cwise_ops_common.h"],
* gpu_srcs = ["cwise_op_gpu_abs.cu.cc", ..., "cwise_op_gpu_tanh.cu.cc",
"cwise_ops.h", "cwise_ops_common.h",
"cwise_ops_gpu_common.cu.h"]
* "cwise_ops_test.cc" is excluded
"""
if not srcs:
srcs = []
if not hdrs:
hdrs = []
if not deps:
deps = []
if not copts:
copts = []
if not gpu_copts:
gpu_copts = []
textual_hdrs = []
copts = copts + tf_copts(is_external = is_external)
# Override EIGEN_STRONG_INLINE to inline when
# --define=override_eigen_strong_inline=true to avoid long compiling time.
# See https://github.com/tensorflow/tensorflow/issues/10521
copts = copts + if_override_eigen_strong_inline(["/DEIGEN_STRONG_INLINE=inline"])
if prefix:
if native.glob([prefix + "*.cu.cc"], exclude = ["*test*"]):
if not gpu_srcs:
gpu_srcs = []
gpu_srcs = gpu_srcs + native.glob(
[prefix + "*.cu.cc", prefix + "*.h"],
exclude = [prefix + "*test*"],
)
srcs = srcs + native.glob(
[prefix + "*.cc"],
exclude = [prefix + "*test*", prefix + "*.cu.cc"],
)
hdrs = hdrs + native.glob(
[prefix + "*.h"],
exclude = [prefix + "*test*", prefix + "*.cu.h", prefix + "*impl.h"],
)
textual_hdrs = native.glob(
[prefix + "*impl.h"],
exclude = [prefix + "*test*", prefix + "*.cu.h"],
)
cuda_deps = [clean_dep("//tensorflow/core:gpu_lib")]
if gpu_srcs:
for gpu_src in gpu_srcs:
if gpu_src.endswith(".cc") and not gpu_src.endswith(".cu.cc"):
fail("{} not allowed in gpu_srcs. .cc sources must end with .cu.cc"
.format(gpu_src))
tf_gpu_kernel_library(
name = name + "_gpu",
srcs = gpu_srcs,
deps = deps,
copts = gpu_copts,
**kwargs
)
cuda_deps.extend([":" + name + "_gpu"])
kwargs["tags"] = kwargs.get("tags", []) + [
"req_dep=%s" % clean_dep("//tensorflow/core:gpu_lib"),
"req_dep=@local_config_cuda//cuda:cuda_headers",
]
tf_gpu_library(
name = name,
srcs = srcs,
hdrs = hdrs,
textual_hdrs = textual_hdrs,
copts = copts,
cuda_deps = cuda_deps,
linkstatic = 1, # Needed since alwayslink is broken in bazel b/27630669
alwayslink = alwayslink,
deps = deps,
**kwargs
)
# TODO(gunan): CUDA dependency not clear here. Fix it.
tf_cc_shared_object(
name = "libtfkernel_%s.so" % name,
srcs = srcs + hdrs,
copts = copts,
tags = ["manual", "notap"],
deps = deps,
)
register_extension_info(
extension_name = "tf_kernel_library",
label_regex_for_dep = "({extension_name}(_gpu)?|libtfkernel_{extension_name}\\.so)",
)
def tf_mkl_kernel_library(
name,
prefix = None,
srcs = None,
hdrs = None,
deps = None,
alwayslink = 1,
copts = tf_copts(allow_exceptions = True) + tf_openmp_copts()):
"""A rule to build MKL-based TensorFlow kernel libraries."""
if not bool(srcs):
srcs = []
if not bool(hdrs):
hdrs = []
if prefix:
srcs = srcs + native.glob(
[prefix + "*.cc"],
exclude = [prefix + "*test*"],
)
hdrs = hdrs + native.glob(
[prefix + "*.h"],
exclude = [prefix + "*test*"],
)
# -fno-exceptions in nocopts breaks compilation if header modules are enabled.
disable_header_modules = ["-use_header_modules"]
cc_library(
name = name,
srcs = if_mkl(srcs),
hdrs = hdrs,
deps = deps,
alwayslink = alwayslink,
copts = copts,
features = disable_header_modules,
)
register_extension_info(
extension_name = "tf_mkl_kernel_library",
label_regex_for_dep = "{extension_name}",
)
def _get_transitive_headers(hdrs, deps):
"""Obtain the header files for a target and its transitive dependencies.
Args:
hdrs: a list of header files
deps: a list of targets that are direct dependencies
Returns:
a collection of the transitive headers
"""
return depset(
hdrs,
transitive = [dep[CcInfo].compilation_context.headers for dep in deps],
)
# Bazel rules for building swig files.
def _py_wrap_cc_impl(ctx):
srcs = ctx.files.srcs
if len(srcs) != 1:
fail("Exactly one SWIG source file label must be specified.", "srcs")
module_name = ctx.attr.module_name
src = ctx.files.srcs[0]
inputs = _get_transitive_headers([src] + ctx.files.swig_includes, ctx.attr.deps)
inputs = depset(ctx.files._swiglib, transitive = [inputs])
inputs = depset(ctx.files.toolchain_deps, transitive = [inputs])
swig_include_dirs = depset(_get_repository_roots(ctx, inputs))
swig_include_dirs = depset(sorted([f.dirname for f in ctx.files._swiglib]), transitive = [swig_include_dirs])
args = [
"-c++",
"-python",
"-module",
module_name,
"-o",
ctx.outputs.cc_out.path,
"-outdir",
ctx.outputs.py_out.dirname,
]
args += ["-l" + f.path for f in ctx.files.swig_includes]
args += ["-I" + i for i in swig_include_dirs.to_list()]
args.append(src.path)
outputs = [ctx.outputs.cc_out, ctx.outputs.py_out]
ctx.actions.run(
executable = ctx.executable._swig,
arguments = args,
inputs = inputs,
outputs = outputs,
mnemonic = "PythonSwig",
progress_message = "SWIGing " + src.path,
)
return struct(files = depset(outputs))
_py_wrap_cc = rule(
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
"swig_includes": attr.label_list(
allow_files = True,
),
"deps": attr.label_list(
allow_files = True,
providers = [CcInfo],
),
"toolchain_deps": attr.label_list(
allow_files = True,
),
"module_name": attr.string(mandatory = True),
"py_module_name": attr.string(mandatory = True),
"_swig": attr.label(
default = Label("@swig//:swig"),
executable = True,
cfg = "host",
),
"_swiglib": attr.label(
default = Label("@swig//:templates"),
allow_files = True,
),
},
outputs = {
"cc_out": "%{module_name}.cc",
"py_out": "%{py_module_name}.py",
},
implementation = _py_wrap_cc_impl,
)
def _get_repository_roots(ctx, files):
"""Returns abnormal root directories under which files reside.
When running a ctx.action, source files within the main repository are all
relative to the current directory; however, files that are generated or exist
in remote repositories will have their root directory be a subdirectory,
e.g. bazel-out/local-fastbuild/genfiles/external/jpeg_archive. This function
returns the set of these devious directories, ranked and sorted by popularity
in order to hopefully minimize the number of I/O system calls within the
compiler, because includes have quadratic complexity.
"""
result = {}
for f in files.to_list():
root = f.root.path
if root:
if root not in result:
result[root] = 0
result[root] -= 1
work = f.owner.workspace_root
if work:
if root:
root += "/"
root += work
if root:
if root not in result:
result[root] = 0
result[root] -= 1
return [k for v, k in sorted([(v, k) for k, v in result.items()])]
# Bazel rule for collecting the header files that a target depends on.
def _transitive_hdrs_impl(ctx):
outputs = _get_transitive_headers([], ctx.attr.deps)
return struct(files = outputs)
_transitive_hdrs = rule(
attrs = {
"deps": attr.label_list(
allow_files = True,
providers = [CcInfo],
),
},
implementation = _transitive_hdrs_impl,
)
def transitive_hdrs(name, deps = [], **kwargs):
_transitive_hdrs(name = name + "_gather", deps = deps)
native.filegroup(name = name, srcs = [":" + name + "_gather"])
# Create a header only library that includes all the headers exported by
# the libraries in deps.
#
# **NOTE**: The headers brought in are **NOT** fully transitive; certain
# deep headers may be missing. Furthermore, the `includes` argument of
# cc_libraries in the dependencies are *not* going to be respected
# when you use cc_header_only_library. Some cases where this creates
# problems include: Eigen, grpc, MLIR. In cases such as these, you must
# find a header-only version of the cc_library rule you care about and
# link it *directly* in addition to your use of the cc_header_only_library
# intermediary.
#
# For:
# * Eigen: it's a header-only library. Add it directly to your deps.
# * GRPC: add a direct dep on @com_github_grpc_grpc//:grpc++_public_hdrs.
#
def cc_header_only_library(name, deps = [], includes = [], extra_deps = [], **kwargs):
_transitive_hdrs(name = name + "_gather", deps = deps)
cc_library(
name = name,
hdrs = [":" + name + "_gather"],
includes = includes,
deps = extra_deps,
**kwargs
)
def tf_custom_op_library_additional_deps():
return [
"@com_google_protobuf//:protobuf_headers",
clean_dep("//third_party/eigen3"),
clean_dep("//tensorflow/core:framework_headers_lib"),
] + if_windows([clean_dep("//tensorflow/python:pywrap_tensorflow_import_lib")])
# A list of targets that contains the implemenation of
# tf_custom_op_library_additional_deps. It's used to generate a DEF file for
# exporting symbols from _pywrap_tensorflow.dll on Windows.
def tf_custom_op_library_additional_deps_impl():
return [
"@com_google_protobuf//:protobuf",
"@nsync//:nsync_cpp",
# for //third_party/eigen3
clean_dep("//third_party/eigen3"),
# for //tensorflow/core:framework_headers_lib
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:reader_base"),
]
# Traverse the dependency graph along the "deps" attribute of the
# target and return a struct with one field called 'tf_collected_deps'.
# tf_collected_deps will be the union of the deps of the current target
# and the tf_collected_deps of the dependencies of this target.
def _collect_deps_aspect_impl(target, ctx):
alldeps = depset()
if hasattr(ctx.rule.attr, "deps"):
for dep in ctx.rule.attr.deps:
alldeps = depset([dep.label], transitive = [alldeps])
if hasattr(dep, "tf_collected_deps"):
alldeps = depset(transitive = [alldeps, dep.tf_collected_deps])
return struct(tf_collected_deps = alldeps)
collect_deps_aspect = aspect(
attr_aspects = ["deps"],
implementation = _collect_deps_aspect_impl,
)
def _dep_label(dep):
label = dep.label
return label.package + ":" + label.name
# This rule checks that the transitive dependencies of targets listed
# in the 'deps' attribute don't depend on the targets listed in
# the 'disallowed_deps' attribute.
def _check_deps_impl(ctx):
disallowed_deps = ctx.attr.disallowed_deps
for input_dep in ctx.attr.deps:
if not hasattr(input_dep, "tf_collected_deps"):
continue
for dep in input_dep.tf_collected_deps.to_list():
for disallowed_dep in disallowed_deps:
if dep == disallowed_dep.label:
fail(
_dep_label(input_dep) + " cannot depend on " + _dep_label(
disallowed_dep,
),
)
return struct()
check_deps = rule(
_check_deps_impl,
attrs = {
"deps": attr.label_list(
aspects = [collect_deps_aspect],
mandatory = True,
allow_files = True,
),
"disallowed_deps": attr.label_list(
mandatory = True,
allow_files = True,
),
},
)
def tf_custom_op_library(name, srcs = [], gpu_srcs = [], deps = [], linkopts = [], copts = [], **kwargs):
"""Helper to build a dynamic library (.so) from the sources containing implementations of custom ops and kernels.
"""
cuda_deps = [
clean_dep("//tensorflow/core:stream_executor_headers_lib"),
"@local_config_cuda//cuda:cuda_headers",
"@local_config_cuda//cuda:cudart_static",
]
rocm_deps = [
clean_dep("//tensorflow/core:stream_executor_headers_lib"),
]
deps = deps + tf_custom_op_library_additional_deps()
# Override EIGEN_STRONG_INLINE to inline when
# --define=override_eigen_strong_inline=true to avoid long compiling time.
# See https://github.com/tensorflow/tensorflow/issues/10521
copts = copts + if_override_eigen_strong_inline(["/DEIGEN_STRONG_INLINE=inline"])
if gpu_srcs:
basename = name.split(".")[0]
cuda_library(
name = basename + "_gpu",
srcs = gpu_srcs,
copts = copts + tf_copts() + _cuda_copts() + rocm_copts() +
if_tensorrt(["-DGOOGLE_TENSORRT=1"]),
deps = deps + if_cuda_is_configured_compat(cuda_deps) + if_rocm_is_configured(rocm_deps),
**kwargs
)
cuda_deps.extend([":" + basename + "_gpu"])
rocm_deps.extend([":" + basename + "_gpu"])
check_deps(
name = name + "_check_deps",
disallowed_deps = [
clean_dep("//tensorflow/core:framework"),
clean_dep("//tensorflow/core:lib"),
],
deps = deps + if_cuda_is_configured_compat(cuda_deps) + if_rocm_is_configured(rocm_deps),
)
tf_cc_shared_object(
name = name,
srcs = srcs,
deps = deps + if_cuda_is_configured_compat(cuda_deps) + if_rocm_is_configured(rocm_deps),
data = if_static([name + "_check_deps"]),
copts = copts + tf_copts(is_external = True),
features = ["windows_export_all_symbols"],
linkopts = linkopts + select({
"//conditions:default": [
"-lm",
],
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:macos"): [],
}),
**kwargs
)
register_extension_info(
extension_name = "tf_custom_op_library",
label_regex_for_dep = "{extension_name}",
)
# Placeholder to use until bazel supports py_strict_library.
def py_strict_library(name, **kwargs):
native.py_library(name = name, **kwargs)
def tf_custom_op_py_library(
name,
srcs = [],
dso = [],
kernels = [],
srcs_version = "PY2AND3",
visibility = None,
deps = []):
_ignore = [kernels]
native.py_library(
name = name,
data = dso,
srcs = srcs,
srcs_version = srcs_version,
visibility = visibility,
deps = deps,
)
register_extension_info(
extension_name = "tf_custom_op_py_library",
label_regex_for_dep = "{extension_name}",
)
# In tf_py_wrap_cc generated libraries
# module init functions are not exported unless
# they contain one of the keywords in the version file
# this prevents custom python modules.
# This function attempts to append init_module_name to list of
# exported functions in version script
def _append_init_to_versionscript_impl(ctx):
mod_name = ctx.attr.module_name
if ctx.attr.is_version_script:
ctx.actions.expand_template(
template = ctx.file.template_file,
output = ctx.outputs.versionscript,
substitutions = {
"global:": "global:\n init_%s;\n _init_%s;\n PyInit_*;\n _PyInit_*;" % (mod_name, mod_name),
},
is_executable = False,
)
else:
ctx.actions.expand_template(
template = ctx.file.template_file,
output = ctx.outputs.versionscript,
substitutions = {
"*tensorflow*": "*tensorflow*\ninit_%s\n_init_%s\nPyInit_*\n_PyInit_*\n" % (mod_name, mod_name),
},
is_executable = False,
)
_append_init_to_versionscript = rule(
attrs = {
"module_name": attr.string(mandatory = True),
"template_file": attr.label(
allow_single_file = True,
mandatory = True,
),
"is_version_script": attr.bool(
default = True,
doc = "whether target is a ld version script or exported symbol list",
mandatory = False,
),
},
outputs = {"versionscript": "%{name}.lds"},
implementation = _append_init_to_versionscript_impl,
)
def tf_py_wrap_cc(
name,
srcs = [],
swig_includes = [],
deps = [],
copts = [],
version_script = None,
**kwargs):
"""Builds a Python extension module."""
module_name = name.split("/")[-1]
# Convert a rule name such as foo/bar/baz to foo/bar/_baz.so
# and use that as the name for the rule producing the .so file.
cc_library_base = "/".join(name.split("/")[:-1] + ["_" + module_name])
# TODO(b/137885063): tf_cc_shared_object needs to be cleaned up; we really
# shouldn't be passing a name qualified with .so here.
cc_library_name = cc_library_base + ".so"
cc_library_pyd_name = "/".join(
name.split("/")[:-1] + ["_" + module_name + ".pyd"],
)
extra_deps = []
# TODO(amitpatankar): Migrate from py_wrap_cc to cc_shared_library.
# TensorFlow python does not use any SWIG sources so we create
# an empty SWIG file. This rule cannot be cleaned up until bazel shared
# library support lands.
if srcs == []:
srcs = ["default.swig"]
native.genrule(
name = "default_swig_rule",
outs = srcs,
cmd = "touch $@",
)
_py_wrap_cc(
name = name + "_py_wrap",
srcs = srcs,
module_name = module_name,
py_module_name = name,
swig_includes = swig_includes,
toolchain_deps = ["@bazel_tools//tools/cpp:current_cc_toolchain"],
deps = deps + extra_deps,
)
if not version_script:
version_script = select({
"@local_config_cuda//cuda:darwin": clean_dep("//tensorflow:tf_exported_symbols.lds"),
"//conditions:default": clean_dep("//tensorflow:tf_version_script.lds"),
})
vscriptname = name + "_versionscript"
_append_init_to_versionscript(
name = vscriptname,
is_version_script = select({
"@local_config_cuda//cuda:darwin": False,
"//conditions:default": True,
}),
module_name = module_name,
template_file = version_script,
)
extra_linkopts = select({
"@local_config_cuda//cuda:darwin": [
"-Wl,-exported_symbols_list,$(location %s.lds)" % vscriptname,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,--version-script",
"$(location %s.lds)" % vscriptname,
],
})
extra_deps += select({
"@local_config_cuda//cuda:darwin": [
"%s.lds" % vscriptname,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"%s.lds" % vscriptname,
],
})
# Due to b/149224972 we have to add libtensorflow_framework.so
# as a dependency so the linker doesn't try and optimize and
# remove it from pywrap_tensorflow_internal.so
# Issue: https://github.com/tensorflow/tensorflow/issues/34117
# Fix: https://github.com/tensorflow/tensorflow/commit/5caa9e83798cb510c9b49acee8a64efdb746207c
extra_deps += if_static(
extra_deps = [],
otherwise = [
clean_dep("//tensorflow:libtensorflow_framework_import_lib"),
],
)
tf_cc_shared_object(
name = cc_library_name,
srcs = [module_name + ".cc"],
# framework_so is no longer needed as libtf.so is included via the extra_deps.
framework_so = [],
copts = copts + if_not_windows([
"-Wno-self-assign",
"-Wno-sign-compare",
"-Wno-write-strings",
]),
linkopts = extra_linkopts,
linkstatic = 1,
deps = deps + extra_deps,
**kwargs
)
# When a non-versioned .so is added as a 'src' to a bazel target, it uses
# -l%(so_name) instead of -l:%(so_file) during linking. When -l%(so_name)
# is passed to ld, it will look for an associated file with the schema
# lib%(so_name).so. Since pywrap_tensorflow is not explicitly versioned
# and is not prefixed with lib_, we add a rule for the creation of an .so
# file with the canonical lib schema (e.g. libNAME.so), so that
# -l%(so_name) is resolved during linking.
#
# See: https://github.com/bazelbuild/bazel/blob/7a6808260a733d50983c1adf0cf5a7493472267f/src/main/java/com/google/devtools/build/lib/rules/cpp/LibrariesToLinkCollector.java#L319
for pattern in SHARED_LIBRARY_NAME_PATTERNS:
name_os = pattern % (cc_library_base, "")
native.genrule(
name = name_os + "_rule",
srcs = [":" + cc_library_name],
outs = [name_os],
cmd = "cp $< $@",
)
native.genrule(
name = "gen_" + cc_library_pyd_name,
srcs = [":" + cc_library_name],
outs = [cc_library_pyd_name],
cmd = "cp $< $@",
)
native.py_library(
name = name,
srcs = [":" + name + ".py"],
srcs_version = "PY2AND3",
data = select({
clean_dep("//tensorflow:windows"): [":" + cc_library_pyd_name],
"//conditions:default": [":" + cc_library_name],
}),
)
# This macro is for running python tests against system installed pip package
# on Windows.
#
# py_test is built as an executable python zip file on Windows, which contains all
# dependencies of the target. Because of the C++ extensions, it would be very
# inefficient if the py_test zips all runfiles, plus we don't need them when running
# tests against system installed pip package. So we'd like to get rid of the deps
# of py_test in this case.
#
# In order to trigger the tests without bazel clean after getting rid of deps,
# we introduce the following :
# 1. When --define=no_tensorflow_py_deps=true, the py_test depends on a marker
# file of the pip package, the test gets to rerun when the pip package change.
# Note that this only works on Windows. See the definition of
# //third_party/tensorflow/tools/pip_package:win_pip_package_marker for specific reasons.
# 2. When --define=no_tensorflow_py_deps=false (by default), it's a normal py_test.
def py_test(deps = [], data = [], kernels = [], **kwargs):
# Python version placeholder
native.py_test(
# TODO(jlebar): Ideally we'd use tcmalloc here.,
deps = select({
"//conditions:default": deps,
clean_dep("//tensorflow:no_tensorflow_py_deps"): [],
}),
data = data + select({
"//conditions:default": kernels,
clean_dep("//tensorflow:no_tensorflow_py_deps"): ["//tensorflow/tools/pip_package:win_pip_package_marker"],
}),
exec_properties = tf_exec_properties(kwargs),
**kwargs
)
register_extension_info(
extension_name = "py_test",
label_regex_for_dep = "{extension_name}",
)
# Similar to py_test above, this macro is used to exclude dependencies for some py_binary
# targets in order to reduce the size of //tensorflow/tools/pip_package:simple_console_windows.
# See https://github.com/tensorflow/tensorflow/issues/22390
def py_binary(name, deps = [], **kwargs):
# Add an extra target for dependencies to avoid nested select statement.
native.py_library(
name = name + "_deps",
deps = deps,
)
# Python version placeholder
native.py_binary(
name = name,
deps = select({
"//conditions:default": [":" + name + "_deps"],
clean_dep("//tensorflow:no_tensorflow_py_deps"): [],
}),
**kwargs
)
register_extension_info(
extension_name = "py_binary",
label_regex_for_dep = "{extension_name}",
)
def tf_py_test(
name,
srcs,
size = "medium",
data = [],
main = None,
args = [],
tags = [],
shard_count = 1,
additional_visibility = [],
kernels = [],
flaky = 0,
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False,
**kwargs):
"""Create one or more python tests with extra tensorflow dependencies."""
xla_test_true_list = []
if "additional_deps" in kwargs:
fail("Use `deps` to specify dependencies. `additional_deps` has been replaced with the standard pattern of `deps`.")
deps = kwargs.pop("deps", [])
# xla_enable_strict_auto_jit is used to run Tensorflow unit tests with all XLA compilable
# kernels compiled with XLA.
if xla_enable_strict_auto_jit:
xla_enabled = True
xla_test_true_list += ["//tensorflow/python:is_xla_test_true"]
if xla_enabled:
deps = deps + tf_additional_xla_deps_py()
if grpc_enabled:
deps = deps + tf_additional_grpc_deps_py()
# NOTE(ebrevdo): This is a workaround for depset() not being able to tell
# the difference between 'dep' and 'clean_dep(dep)'.
for to_add in [
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/python:gradient_checker",
]:
if to_add not in deps and clean_dep(to_add) not in deps:
deps.append(clean_dep(to_add))
# Python version placeholder
kwargs.setdefault("srcs_version", "PY2AND3")
py_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
flaky = flaky,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = tags,
visibility = [clean_dep("//tensorflow:internal")] +
additional_visibility,
deps = depset(deps + xla_test_true_list),
**kwargs
)
register_extension_info(
extension_name = "tf_py_test",
label_regex_map = {"deps": "deps:{extension_name}"},
)
def gpu_py_test(
name,
srcs,
size = "medium",
data = [],
main = None,
args = [],
shard_count = 1,
kernels = [],
tags = [],
flaky = 0,
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False,
**kwargs):
# TODO(b/122522101): Don't ignore xla_enable_strict_auto_jit and enable additional
# XLA tests once enough compute resources are available.
_ignored = [xla_enable_strict_auto_jit]
if main == None:
main = name + ".py"
if "additional_deps" in kwargs:
fail("Use `deps` to specify dependencies. `additional_deps` has been replaced with the standard pattern of `deps`.")
for config in ["cpu", "gpu"]:
test_name = name
test_tags = tags
if config == "gpu":
test_name += "_gpu"
test_tags = test_tags + tf_gpu_tests_tags()
tf_py_test(
name = test_name,
size = size,
srcs = srcs,
args = args,
data = data,
flaky = flaky,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = test_tags,
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = False,
**kwargs
)
register_extension_info(
extension_name = "gpu_py_test",
label_regex_map = {"deps": "deps:{extension_name}"},
)
# terminology changes: saving cuda_* definition for compatibility
def cuda_py_test(*args, **kwargs):
gpu_py_test(*args, **kwargs)
register_extension_info(
extension_name = "cuda_py_test",
label_regex_map = {"deps": "deps:{extension_name}"},
)
def sycl_py_test(
name,
srcs,
size = "medium",
data = [],
main = None,
args = [],
shard_count = 1,
kernels = [],
tags = [],
flaky = 0,
xla_enabled = False,
grpc_enabled = False,
**kwargs):
test_tags = tags + tf_sycl_tests_tags()
if "additional_deps" in kwargs:
fail("Use `deps` to specify dependencies. `additional_deps` has been replaced with the standard pattern of `deps`.")
tf_py_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
flaky = flaky,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = test_tags,
xla_enabled = xla_enabled,
**kwargs
)
register_extension_info(
extension_name = "sycl_py_test",
label_regex_map = {"deps": "deps:{extension_name}"},
)
def py_tests(
name,
srcs,
size = "medium",
kernels = [],
data = [],
tags = [],
shard_count = 1,
prefix = "",
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False,
**kwargs):
if "additional_deps" in kwargs:
fail("Use `deps` to specify dependencies. `additional_deps` has been replaced with the standard pattern of `deps`.")
for src in srcs:
test_name = src.split("/")[-1].split(".")[0]
if prefix:
test_name = "%s_%s" % (prefix, test_name)
tf_py_test(
name = test_name,
size = size,
srcs = [src],
data = data,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = src,
shard_count = shard_count,
tags = tags,
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = xla_enable_strict_auto_jit,
**kwargs
)
def gpu_py_tests(
name,
srcs,
size = "medium",
kernels = [],
data = [],
shard_count = 1,
tags = [],
prefix = "",
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False,
**kwargs):
# TODO(b/122522101): Don't ignore xla_enable_strict_auto_jit and enable additional
# XLA tests once enough compute resources are available.
_ignored = [xla_enable_strict_auto_jit]
test_tags = tags + tf_gpu_tests_tags()
if "additional_deps" in kwargs:
fail("Use `deps` to specify dependencies. `additional_deps` has been replaced with the standard pattern of `deps`.")
py_tests(
name = name,
size = size,
srcs = srcs,
data = data,
grpc_enabled = grpc_enabled,
kernels = kernels,
prefix = prefix,
shard_count = shard_count,
tags = test_tags,
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = False,
**kwargs
)
# terminology changes: saving cuda_* definition for compatibility
def cuda_py_tests(*args, **kwargs):
gpu_py_tests(*args, **kwargs)
# Creates a genrule named <name> for running tools/proto_text's generator to
# make the proto_text functions, for the protos passed in <srcs>.
#
# Return a struct with fields (hdrs, srcs) containing the names of the
# generated files.
def tf_generate_proto_text_sources(name, srcs_relative_dir, srcs, protodeps = [], deps = [], visibility = None):
out_hdrs = (
[
p.replace(".proto", ".pb_text.h")
for p in srcs
] + [p.replace(".proto", ".pb_text-impl.h") for p in srcs]
)
out_srcs = [p.replace(".proto", ".pb_text.cc") for p in srcs]
native.genrule(
name = name + "_srcs",
srcs = srcs + protodeps + [clean_dep("//tensorflow/tools/proto_text:placeholder.txt")],
outs = out_hdrs + out_srcs,
visibility = visibility,
cmd =
"$(location //tensorflow/tools/proto_text:gen_proto_text_functions) " +
"$(@D) " + srcs_relative_dir + " $(SRCS)",
exec_tools = [
clean_dep("//tensorflow/tools/proto_text:gen_proto_text_functions"),
],
)
native.filegroup(
name = name + "_hdrs",
srcs = out_hdrs,
visibility = visibility,
)
cc_library(
name = name,
srcs = out_srcs,
hdrs = out_hdrs,
visibility = visibility,
deps = deps,
alwayslink = 1,
)
def tf_genrule_cmd_append_to_srcs(to_append):
return ("cat $(SRCS) > $(@) && " + "echo >> $(@) && " + "echo " + to_append +
" >> $(@)")
def tf_version_info_genrule(name, out):
native.genrule(
name = name,
srcs = [
clean_dep("@local_config_git//:gen/spec.json"),
clean_dep("@local_config_git//:gen/head"),
clean_dep("@local_config_git//:gen/branch_ref"),
],
outs = [out],
cmd =
"$(location //tensorflow/tools/git:gen_git_source) --generate $(SRCS) \"$@\" --git_tag_override=$${GIT_TAG_OVERRIDE:-}",
local = 1,
exec_tools = [clean_dep("//tensorflow/tools/git:gen_git_source")],
)
def tf_py_build_info_genrule(name, out, **kwargs):
native.genrule(
name = name,
outs = [out],
cmd =
"$(location //tensorflow/tools/build_info:gen_build_info) --raw_generate \"$@\" " +
" --is_config_cuda " + if_cuda("True", "False") +
" --is_config_rocm " + if_rocm("True", "False") +
" --key_value " +
if_cuda(" cuda_version_number=$${TF_CUDA_VERSION:-} cudnn_version_number=$${TF_CUDNN_VERSION:-} ", "") +
if_windows(" msvcp_dll_names=msvcp140.dll,msvcp140_1.dll ", "") +
if_windows_cuda(" ".join([
"nvcuda_dll_name=nvcuda.dll",
"cudart_dll_name=cudart64_$$(echo $${TF_CUDA_VERSION:-} | sed \"s/\\.//\").dll",
"cudnn_dll_name=cudnn64_$${TF_CUDNN_VERSION:-}.dll",
]), ""),
local = 1,
exec_tools = [clean_dep("//tensorflow/tools/build_info:gen_build_info")],
**kwargs
)
def cc_library_with_android_deps(
deps,
android_deps = [],
common_deps = [],
copts = tf_copts(),
**kwargs):
deps = if_not_android(deps) + if_android(android_deps) + common_deps
cc_library(deps = deps, copts = copts, **kwargs)
register_extension_info(
extension_name = "cc_library_with_android_deps",
label_regex_for_dep = "{extension_name}",
)
def tensorflow_opensource_extra_deps():
return []
# buildozer: disable=function-docstring-args
def pybind_extension(
name,
srcs,
module_name,
hdrs = [],
features = [],
srcs_version = "PY2AND3",
data = [],
copts = [],
linkopts = [],
deps = [],
defines = [],
visibility = None,
testonly = None,
licenses = None,
compatible_with = None,
restricted_to = None,
deprecation = None,
link_in_framework = False):
"""Builds a generic Python extension module."""
_ignore = [module_name]
p = name.rfind("/")
if p == -1:
sname = name
prefix = ""
else:
sname = name[p + 1:]
prefix = name[:p + 1]
so_file = "%s%s.so" % (prefix, sname)
pyd_file = "%s%s.pyd" % (prefix, sname)
symbol = "init%s" % sname
symbol2 = "init_%s" % sname
symbol3 = "PyInit_%s" % sname
exported_symbols_file = "%s-exported-symbols.lds" % name
version_script_file = "%s-version-script.lds" % name
native.genrule(
name = name + "_exported_symbols",
outs = [exported_symbols_file],
cmd = "echo '_%s\n_%s\n_%s' >$@" % (symbol, symbol2, symbol3),
output_licenses = ["unencumbered"],
visibility = ["//visibility:private"],
testonly = testonly,
)
native.genrule(
name = name + "_version_script",
outs = [version_script_file],
cmd = "echo '{global:\n %s;\n %s;\n %s;\n local: *;};' >$@" % (symbol, symbol2, symbol3),
output_licenses = ["unencumbered"],
visibility = ["//visibility:private"],
testonly = testonly,
)
# If we are to link to libtensorflow_framework.so, add
# it as a source.
if link_in_framework:
srcs += tf_binary_additional_srcs()
cc_binary(
name = so_file,
srcs = srcs + hdrs,
data = data,
copts = copts + [
"-fno-strict-aliasing",
"-fexceptions",
] + select({
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-fvisibility=hidden",
],
}),
linkopts = linkopts + _rpath_linkopts(name) + select({
"@local_config_cuda//cuda:darwin": [
"-Wl,-exported_symbols_list,$(location %s)" % exported_symbols_file,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,--version-script",
"$(location %s)" % version_script_file,
],
}),
deps = deps + [
exported_symbols_file,
version_script_file,
],
defines = defines,
features = features + ["-use_header_modules"],
linkshared = 1,
testonly = testonly,
licenses = licenses,
visibility = visibility,
deprecation = deprecation,
restricted_to = restricted_to,
compatible_with = compatible_with,
)
native.genrule(
name = name + "_pyd_copy",
srcs = [so_file],
outs = [pyd_file],
cmd = "cp $< $@",
output_to_bindir = True,
visibility = visibility,
deprecation = deprecation,
restricted_to = restricted_to,
compatible_with = compatible_with,
)
native.py_library(
name = name,
data = select({
"@org_tensorflow//tensorflow:windows": [pyd_file],
"//conditions:default": [so_file],
}),
srcs_version = srcs_version,
licenses = licenses,
testonly = testonly,
visibility = visibility,
deprecation = deprecation,
restricted_to = restricted_to,
compatible_with = compatible_with,
)
# buildozer: enable=function-docstring-args
def tf_python_pybind_extension(
name,
srcs,
module_name,
features = [],
copts = [],
hdrs = [],
deps = [],
defines = [],
visibility = None):
"""A wrapper macro for pybind_extension that is used in tensorflow/python/BUILD.
Please do not use it anywhere else as it may behave unexpectedly. b/146445820
It is used for targets under //third_party/tensorflow/python that link
against libtensorflow_framework.so and pywrap_tensorflow_internal.so.
"""
pybind_extension(
name,
srcs,
module_name,
features = features,
copts = copts,
hdrs = hdrs,
deps = deps + tf_binary_pybind_deps() + mkl_deps(),
defines = defines,
visibility = visibility,
link_in_framework = True,
)
def tf_pybind_cc_library_wrapper(name, deps, visibility = None):
"""Wrapper for cc_library and proto dependencies used by tf_python_pybind_extension.
This wrapper ensures that cc libraries' and protos' headers are made
available to pybind code, without creating ODR violations in the dynamically
linked case. The symbols in these deps symbols should be linked to, and
exported by, the core pywrap_tensorflow_internal.so
"""
cc_header_only_library(name = name, deps = deps, visibility = visibility)
def if_cuda_or_rocm(if_true, if_false = []):
"""Shorthand for select()'ing whether to build for either CUDA or ROCm.
Returns a select statement which evaluates to
if_true if we're building with either CUDA or ROCm enabled.
if_false, otherwise.
Sometimes a target has additional CUDa or ROCm specific dependencies.
The `if_cuda` / `if_rocm` functions are used to specify these additional
dependencies. For eg, see the `//tensorflow/core/kernels:bias_op` target
If the same additional dependency is needed for both CUDA and ROCm
(for eg. `reduction_ops` dependency for the `bias_op` target above),
then specifying that dependency in both both `if_cuda` and `if_rocm` will
result in both those functions returning a select statement, which contains
the same dependency, which then leads to a duplicate dependency bazel error.
In order to work around this error, any additional dependency that is common
to both the CUDA and ROCm platforms, should be specified using this function.
Doing so will eliminate the cause of the bazel error (i.e. the same
dependency showing up in two different select statements)
"""
return select({
"@local_config_cuda//cuda:using_nvcc": if_true,
"@local_config_cuda//cuda:using_clang": if_true,
"@local_config_rocm//rocm:using_hipcc": if_true,
"//conditions:default": if_false,
})
def tf_monitoring_deps():
return []
def tf_jit_compilation_passes_extra_deps():
return []
def if_mlir(if_true, if_false = []):
return select({
str(Label("//tensorflow:with_mlir_support")): if_true,
"//conditions:default": if_false,
})
def tfcompile_extra_flags():
return ""
def tf_external_workspace_visible(visibility):
# External workspaces can see this target.
return ["//visibility:public"]
def _filegroup_as_file_impl(ctx):
out = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(
output = out,
content = "\n".join([f.short_path for f in ctx.files.dep]),
)
return DefaultInfo(files = depset([out]))
_filegroup_as_file = rule(
implementation = _filegroup_as_file_impl,
attrs = {
"dep": attr.label(),
},
)
def filegroup_as_file(name, dep, visibility = []):
"""Creates a filegroup ${name}_file which contains the file ${name}."""
_filegroup_as_file(name = name, dep = dep)
native.filegroup(
name = name + "_file",
srcs = [name],
visibility = visibility,
)
|
the-stack_0_26999
|
inp = open('../input/1.txt', 'r').read().split(', ')
positions = set()
tp = []
direc, pos, trans = (0, [0, 0], {0: [0, 1], 1: [1, 0], 2: [0, -1], 3: [-1, 0]})
for x in inp:
direc = (direc + 1) % 4 if x[0] == 'L' else (direc - 1) % 4
for j in range(int(x[1:])):
pos = [e + (trans[direc][i]) for i, e in enumerate(pos)]
if tuple(pos) in positions:
tp.append(pos)
positions.add(tuple(pos))
dists = ([sum([abs(i) for i in x]) for x in tp], sum([abs(i) for i in pos]))
print(dists)
|
the-stack_0_27000
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-storage"
PACKAGE_PPRINT_NAME = "Storage Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.5.0',
'azure-common~=1.1',
'azure-mgmt-core>=1.0.0,<2.0.0',
],
extras_require={
":python_version<'3.0'": ['azure-mgmt-nspkg'],
}
)
|
the-stack_0_27002
|
import copy
import re
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import auto_fp16
from .. import builder
from ...core.ops import rsc, NormRegularizer, balance_losses
class EvalModeSetter:
def __init__(self, module, m_type):
self.module = module
self.modes_storage = dict()
self.m_types = m_type
if not isinstance(self.m_types, (tuple, list)):
self.m_types = [self.m_types]
def __enter__(self):
for name, module in self.module.named_modules():
matched = any(isinstance(module, m_type) for m_type in self.m_types)
if matched:
self.modes_storage[name] = module.training
module.train(mode=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for name, module in self.module.named_modules():
if name in self.modes_storage:
module.train(mode=self.modes_storage[name])
class BaseRecognizer(nn.Module, metaclass=ABCMeta):
"""Base class for recognizers.
All recognizers should subclass it.
All subclass should overwrite:
- Methods:``reshape_images``, supporting the input reshape.
Args:
backbone (dict): Backbone modules to extract feature.
reducer (dict): Spatial-temporal modules to reduce feature. Default: None.
cls_head (dict): Classification head to process feature.
class_sizes (list): Number of samples for each class in each task. Default: None.
train_cfg (dict): Config for training. Default: None.
test_cfg (dict): Config for testing. Default: None.
bn_eval (bool): Whether to switch all BN in eval mode. Default: False.
bn_frozen (bool): Whether to disable backprop for all BN. Default: False.
"""
def __init__(self,
backbone,
cls_head,
reducer=None,
class_sizes=None,
class_maps=None,
train_cfg=None,
test_cfg=None,
bn_eval=False,
bn_frozen=False,
reg_cfg=None):
super().__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.fp16_enabled = False
self.multi_head = class_sizes is not None and len(class_sizes) > 1
self.with_self_challenging = hasattr(train_cfg, 'self_challenging') and train_cfg.self_challenging.enable
self.with_clip_mixing = hasattr(train_cfg, 'clip_mixing') and train_cfg.clip_mixing.enable
self.with_loss_norm = hasattr(train_cfg, 'loss_norm') and train_cfg.loss_norm.enable
self.with_sample_filtering = hasattr(train_cfg, 'sample_filtering') and train_cfg.sample_filtering.enable
if class_maps is None:
num_classes = cls_head.num_classes
self.CLASSES = {0: {ii: ii for ii in range(num_classes)}}
else:
self.CLASSES = copy.deepcopy(class_maps)
self.train_meta = {}
self.backbone = builder.build_backbone(backbone)
self.spatial_temporal_module = builder.build_reducer(reducer)
self.cls_head = builder.build_head(cls_head, class_sizes)
if self.with_clip_mixing:
self.clip_mixing_loss = builder.build_loss(dict(
type='ClipMixingLoss',
mode=train_cfg.clip_mixing.mode,
loss_weight=train_cfg.clip_mixing.weight
))
self.losses_meta = None
if self.with_loss_norm:
assert 0.0 < train_cfg.loss_norm.gamma <= 1.0
self.losses_meta = dict()
self.regularizer = NormRegularizer(**reg_cfg) if reg_cfg is not None else None
self.init_weights()
def init_weights(self):
for module in self.children():
if hasattr(module, 'init_weights'):
module.init_weights()
heads = self.cls_head if self.multi_head else [self.cls_head]
for head in heads:
if hasattr(head, 'init_weights'):
head.init_weights()
def update_state(self, *args, **kwargs):
for module in self.children():
if hasattr(module, 'update_state'):
module.update_state(*args, **kwargs)
heads = self.cls_head if self.multi_head else [self.cls_head]
for head in heads:
if hasattr(head, 'update_state'):
head.update_state(*args, **kwargs)
@auto_fp16()
def _forward_module_train(self, module, x, losses, squeeze=False, **kwargs):
if module is None:
y = x
elif hasattr(module, 'loss'):
y, extra_data = module(x, return_extra_data=True)
losses.update(module.loss(**extra_data, **kwargs))
else:
y = module(x)
if squeeze and isinstance(y, (list, tuple)):
assert len(y) == 1
y = y[0]
return y
@auto_fp16()
def _extract_features_test(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
y = self.backbone(imgs)
if isinstance(y, (list, tuple)):
assert len(y) == 1
y = y[0]
if self.spatial_temporal_module is not None:
y = self.spatial_temporal_module(y)
return y
def _average_clip(self, cls_score):
"""Averaging class score over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score.
Args:
cls_score (torch.Tensor): Class score to be averaged.
return:
torch.Tensor: Averaged class score.
"""
if 'average_clips' not in self.test_cfg.keys():
raise KeyError('"average_clips" must defined in test_cfg\'s keys')
average_clips = self.test_cfg['average_clips']
if average_clips not in ['score', 'prob', None]:
raise ValueError(f'{average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", None]')
if average_clips == 'prob':
cls_score = F.softmax(cls_score, dim=1).mean(dim=0, keepdim=True)
elif average_clips == 'score':
cls_score = cls_score.mean(dim=0, keepdim=True)
return cls_score
@abstractmethod
def reshape_input(self, imgs, masks=None):
pass
@abstractmethod
def reshape_input_inference(self, imgs, masks=None):
pass
@staticmethod
def _infer_head(head_module, *args, **kwargs):
out = head_module(*args, **kwargs)
if isinstance(out, (tuple, list)):
assert len(out) == 3
return out
else:
return out, None, None
@staticmethod
def _filter(x, mask):
if x is None:
return None
elif mask is None:
return x
elif isinstance(x, (tuple, list)):
return [_x[mask] for _x in x]
else:
return x[mask]
def forward_train(self, imgs, labels, dataset_id=None, attention_mask=None, **kwargs):
imgs, attention_mask, head_args = self.reshape_input(imgs, attention_mask)
losses = dict()
num_clips = imgs.size(0) // labels.size(0)
if num_clips > 1:
labels = labels.view(-1, 1).repeat(1, num_clips).view(-1)
if dataset_id is not None:
dataset_id = dataset_id.view(-1, 1).repeat(1, num_clips).view(-1)
features = self._forward_module_train(
self.backbone, imgs, losses,
squeeze=True, attention_mask=attention_mask
)
features = self._forward_module_train(
self.spatial_temporal_module, features, losses
)
if self.with_self_challenging and not features.requires_grad:
features.requires_grad = True
if self.with_sample_filtering:
pred_labels = torch.zeros_like(labels.view(-1))
heads = self.cls_head if self.multi_head else [self.cls_head]
for head_id, cl_head in enumerate(heads):
trg_mask = (dataset_id == head_id).view(-1) if dataset_id is not None else None
trg_labels = self._filter(labels, trg_mask)
trg_num_samples = trg_labels.numel()
if trg_num_samples == 0:
continue
if self.with_self_challenging:
trg_features = self._filter(features, trg_mask)
trg_main_scores, _, _ = self._infer_head(
cl_head,
*([trg_features] + head_args),
labels=trg_labels.view(-1)
)
trg_features = rsc(
trg_features,
trg_main_scores,
trg_labels, 1.0 - self.train_cfg.self_challenging.drop_p
)
with EvalModeSetter(cl_head, m_type=(nn.BatchNorm2d, nn.BatchNorm3d)):
trg_main_scores, trg_norm_embd, trg_extra_scores = self._infer_head(
cl_head,
*([trg_features] + head_args),
labels=trg_labels.view(-1),
return_extra_data=True
)
else:
all_main_scores, all_norm_embd, all_extra_scores = self._infer_head(
cl_head,
*([features] + head_args),
labels=labels.view(-1),
return_extra_data=True
)
trg_main_scores = self._filter(all_main_scores, trg_mask)
trg_extra_scores = self._filter(all_extra_scores, trg_mask)
trg_norm_embd = self._filter(all_norm_embd, trg_mask)
# main head loss
losses.update(cl_head.loss(
main_cls_score=trg_main_scores,
extra_cls_score=trg_extra_scores,
labels=trg_labels.view(-1),
norm_embd=trg_norm_embd,
name=str(head_id)
))
# clip mixing loss
if self.with_clip_mixing:
losses['loss/clip_mix' + str(head_id)] = self.clip_mixing_loss(
trg_main_scores, trg_norm_embd, num_clips, cl_head.last_scale
)
if self.with_sample_filtering:
with torch.no_grad():
pred_labels[trg_mask] = torch.argmax(trg_main_scores, dim=1)
if self.regularizer is not None:
losses['loss/reg'] = self.regularizer(self.backbone)
if self.with_sample_filtering:
self._add_train_meta_info(pred_labels=pred_labels, **kwargs)
return losses
def _add_train_meta_info(self, **kwargs):
for meta_name in ['pred_labels', 'sample_idx', 'clip_starts', 'clip_ends']:
assert meta_name in kwargs.keys(), f'There is no {meta_name} in meta info'
assert kwargs[meta_name] is not None, f'The value of {meta_name} is None'
self.train_meta[meta_name] = kwargs[meta_name].clone().view(-1).detach()
def forward_test(self, imgs, dataset_id=None):
"""Defines the computation performed at every call when evaluation and
testing."""
imgs, _, head_args = self.reshape_input(imgs)
y = self._extract_features_test(imgs)
if self.multi_head:
assert dataset_id is not None
head_outs = []
for cls_head in self.cls_head:
head_y = cls_head(y, *head_args)
head_out = self._average_clip(head_y)
head_outs.append(head_out.cpu().numpy())
out = []
dataset_id = dataset_id.view(-1).cpu().numpy()
for idx, head_id in enumerate(dataset_id):
out.extend(head_outs[head_id][idx].reshape([1, -1]))
else:
y = self.cls_head(y, *head_args)
out = self._average_clip(y).cpu().numpy()
return out
def forward_inference(self, imgs):
"""Used for computing network FLOPs and ONNX export.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
if self.multi_head:
raise NotImplementedError('Inference does not support multi-head architectures')
imgs, _, head_args = self.reshape_input_inference(imgs)
y = self._extract_features_test(imgs)
out = self.cls_head(y, *head_args)
return out
def forward(self, imgs, label=None, return_loss=True, dataset_id=None, **kwargs):
"""Define the computation performed at every call."""
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(imgs, label, dataset_id, **kwargs)
else:
return self.forward_test(imgs, dataset_id)
@staticmethod
def _parse_losses(losses, multi_head, enable_loss_norm=False, losses_meta=None, gamma=0.9):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
elif isinstance(loss_value, float):
log_vars[loss_name] = loss_value
else:
raise TypeError(f'{loss_name} is not a tensor or list of tensors')
if enable_loss_norm and losses_meta is not None:
loss_groups = defaultdict(list)
single_losses = []
for _key, _value in log_vars.items():
if 'loss' not in _key:
continue
end_digits_match = re.search(r'\d+$', _key)
if end_digits_match is None:
single_losses.append(_value)
else:
end_digits = end_digits_match.group()
loss_group_name = _key[:-len(end_digits)]
loss_groups[loss_group_name].append((_key, _value))
group_losses = []
for loss_group in loss_groups.values():
group_losses.extend(balance_losses(loss_group, losses_meta, gamma))
loss = sum(single_losses + group_losses)
else:
loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
if not isinstance(loss_value, torch.Tensor):
continue
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
if not multi_head or loss_name == 'loss':
loss_value = loss_value.clone().detach()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(
losses,
self.multi_head,
self.with_loss_norm,
self.losses_meta,
self.train_cfg.loss_norm.gamma if self.with_loss_norm else 0.9
)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(
losses,
self.multi_head,
self.with_loss_norm,
self.losses_meta,
self.train_cfg.loss_norm.gamma if self.with_loss_norm else 0.9
)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def train(self, train_mode=True):
super(BaseRecognizer, self).train(train_mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm3d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
return self
|
the-stack_0_27005
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Iterable
import numpy as np
from ... import opcodes as OperandDef
from ...core import ENTITY_TYPE, recursive_tile
from ...core.context import get_context
from ...serialization.serializables import KeyField, AnyField, StringField, BoolField
from ...utils import has_unknown_shape
from ..datasource import tensor as astensor
from ..base import moveaxis, where
from ..indexing import take
from ..arithmetic import isnan, add
from ..reduction import any as tensor_any
from ..operands import TensorOperand, TensorOperandMixin
from ..core import TENSOR_TYPE, TENSOR_CHUNK_TYPE, TensorOrder
from ..utils import check_out_param
from ..array_utils import as_same_device, device
from .core import _ureduce
def _quantile_is_valid(q):
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if q[i] < 0.0 or q[i] > 1.0:
return False
else:
# faster than any()
if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0):
return False
return True
def _quantile_ureduce_func(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
):
a = astensor(a)
out = astensor(out) if out is not None else None
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# prepare a for partitioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == "lower":
indices = np.floor(indices).astype(np.intp)
elif interpolation == "higher":
indices = np.ceil(indices).astype(np.intp)
elif interpolation == "midpoint":
indices = 0.5 * (np.floor(indices) + np.ceil(indices))
elif interpolation == "nearest":
indices = np.around(indices).astype(np.intp)
else:
assert interpolation == "linear"
# keep index as fraction and interpolate
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == np.intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = np.concatenate((indices, [-1]))
ap.partition(indices, axis=axis, need_align=True)
# ensure axis with q-th is first
ap = moveaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = np.floor(indices).astype(np.intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = np.concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1 - weights_above
weights_shape = [1] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(
np.concatenate((indices_below, indices_above)), axis=axis, need_align=True
)
# ensure axis with q-th is first
ap = moveaxis(ap, axis, 0)
weights_below = np.moveaxis(weights_below, axis, 0)
weights_above = np.moveaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with q-th is first
x1 = moveaxis(x1, axis, 0)
x2 = moveaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if isinstance(n, TENSOR_TYPE):
if zerod:
if ap.ndim == 1:
r.data = where(tensor_any(n), a.dtype.type(np.nan), r).data
if out is not None:
out.data = r.data
else:
r[:] = where(
tensor_any(n), where(n.squeeze(0), a.dtype.type(np.nan), r), r
)
else:
if r.ndim == 1:
r[:] = where(tensor_any(n), np.full(r.shape, a.dtype.type(np.nan)), r)
else:
r[:] = where(
tensor_any(n),
where(n.repeat(q.size, 0), a.dtype.type(np.nan), r),
r,
)
return r
q_error_msg = "Quantiles must be in the range [0, 1]"
class TensorQuantile(TensorOperand, TensorOperandMixin):
__slots__ = ("q_error_msg",)
_op_type_ = OperandDef.QUANTILE
_a = KeyField("a")
_q = AnyField("q")
_axis = AnyField("axis")
_out = KeyField("out")
_overwrite_input = BoolField("overwrite_input")
_interpolation = StringField("interpolation")
_keepdims = BoolField("keepdims")
def __init__(
self,
q=None,
axis=None,
out=None,
overwrite_input=None,
interpolation=None,
keepdims=None,
**kw,
):
self.q_error_msg = kw.pop("q_error_msg", q_error_msg)
super().__init__(
_q=q,
_axis=axis,
_interpolation=interpolation,
_out=out,
_overwrite_input=overwrite_input,
_keepdims=keepdims,
**kw,
)
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._a = self._inputs[0]
if isinstance(self._q, (TENSOR_TYPE, TENSOR_CHUNK_TYPE)):
self._q = self._inputs[1]
if isinstance(self._out, (TENSOR_TYPE, TENSOR_CHUNK_TYPE)):
self._out = self._inputs[-1]
@property
def a(self):
return self._a
@property
def q(self):
return self._q
@property
def axis(self):
return self._axis
@property
def out(self):
return self._out
@property
def overwrite_input(self):
return self._overwrite_input
@property
def interpolation(self):
return self._interpolation
@property
def keepdims(self):
return self._keepdims
def __call__(self, a, q=None, out=None):
shape = [self._q.size] if self._q.ndim > 0 else []
if self._axis is None:
exclude_axes = set(range(a.ndim))
elif isinstance(self._axis, tuple):
exclude_axes = set(self._axis)
else:
exclude_axes = {self._axis}
for ax, s in enumerate(a.shape):
if ax not in exclude_axes:
shape.append(s)
elif self._keepdims:
shape.append(1)
inputs = [a] if q is None else [a, q]
order = TensorOrder.C_ORDER
if out is not None:
inputs.append(out)
order = out.order
shape = out.shape
t = self.new_tensor(inputs, shape=tuple(shape), order=order)
if out is not None:
check_out_param(out, t, "same_kind")
out.data = t.data
return out
else:
return t
@classmethod
def _tile(cls, op, q):
r, k = _ureduce(
op.a,
func=_quantile_ureduce_func,
q=q,
axis=op.axis,
out=op.out,
overwrite_input=op.overwrite_input,
interpolation=op.interpolation,
)
if op.keepdims:
return r.reshape(q.shape + k)
else:
return r
@classmethod
def _tile_one_chunk(cls, op, q):
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_op._q = q
chunk_inputs = [in_tensor.chunks[0]]
if op.out is not None:
chunk_inputs.append(op.out.chunks[0])
chunk = chunk_op.new_chunk(
chunk_inputs,
shape=out_tensor.shape,
index=(0,) * out_tensor.ndim,
order=out_tensor.order,
)
op = op.copy()
return op.new_tensors(
op.inputs,
shape=out_tensor.shape,
order=out_tensor.order,
nsplits=tuple((s,) for s in out_tensor.shape),
chunks=[chunk],
)
@classmethod
def tile(cls, op):
if isinstance(op.q, TENSOR_TYPE):
# trigger execution of `q`
yield op.q.chunks
ctx = get_context()
# get q's data
q_chunk_keys = [c.key for c in op.q.chunks]
q_data = ctx.get_chunks_result(q_chunk_keys)
op._q = q = np.concatenate(q_data)
if not _quantile_is_valid(q):
raise ValueError(op.q_error_msg)
else:
if has_unknown_shape(*op.inputs):
yield
q = np.asarray(op.q)
if len(op.a.chunks) == 1 and (op.out is None or len(op.out.chunks) == 1):
return cls._tile_one_chunk(op, q)
else:
tiled = yield from recursive_tile(cls._tile(op, q))
return [tiled]
@classmethod
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True
)
a = inputs[0]
out = inputs[-1].copy() if op.out is not None else None
with device(device_id):
ctx[op.outputs[0].key] = xp.quantile(
a,
q=op.q,
axis=op.axis,
out=out,
interpolation=op.interpolation,
keepdims=op.keepdims,
)
INTERPOLATION_TYPES = {"linear", "lower", "higher", "midpoint", "nearest"}
def _quantile_unchecked(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
q_error_msg=None,
handle_non_numeric=None,
):
a = astensor(a)
raw_dtype = a.dtype
need_view_back = False
if handle_non_numeric and not np.issubdtype(a.dtype, np.number):
# enable handle_non_numeric is often used
# to handle the datetime-like dtype
a = a.astype("i8")
need_view_back = True
if isinstance(q, ENTITY_TYPE):
q = astensor(q)
# do check in tile
q_input = q
else:
q_input = None
if isinstance(axis, Iterable):
axis = tuple(axis)
if q.ndim > 1:
raise ValueError("`q` should be a scalar or array of float")
if out is not None and not isinstance(out, TENSOR_TYPE):
raise TypeError(f"`out` should be a tensor, got {type(out)}")
if interpolation not in INTERPOLATION_TYPES:
raise ValueError(
"interpolation can only be 'linear', 'lower' "
"'higher', 'midpoint', or 'nearest'"
)
# infer dtype
q_tiny = np.random.rand(2 if q.size % 2 == 0 else 1).astype(q.dtype)
if handle_non_numeric and not np.issubdtype(a.dtype, np.number):
dtype = a.dtype
else:
dtype = np.quantile(
np.empty(1, dtype=a.dtype), q_tiny, interpolation=interpolation
).dtype
op = TensorQuantile(
q=q,
axis=axis,
out=out,
overwrite_input=overwrite_input,
interpolation=interpolation,
keepdims=keepdims,
handle_non_numeric=handle_non_numeric,
q_error_msg=q_error_msg,
dtype=dtype,
gpu=a.op.gpu,
)
ret = op(a, q=q_input, out=out)
if need_view_back:
ret = ret.astype(raw_dtype)
return ret
def quantile(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
**kw,
):
"""
Compute the q-th quantile of the data along the specified axis.
Parameters
----------
a : array_like
Input tensor or object that can be converted to a tensor.
q : array_like of float
Quantile or sequence of quantiles to compute, which must be between
0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The
default is to compute the quantile(s) along a flattened
version of the tensor.
out : Tensor, optional
Alternative output tensor in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
Just for compatibility with Numpy, would not take effect.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original tensor `a`.
Returns
-------
quantile : scalar or Tensor
If `q` is a single quantile and `axis=None`, then the result
is a scalar. If multiple quantiles are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that tensor is
returned instead.
See Also
--------
mean
percentile : equivalent to quantile, but with q in the range [0, 100].
median : equivalent to ``quantile(..., 0.5)``
nanquantile
Notes
-----
Given a vector ``V`` of length ``N``, the q-th quantile of
``V`` is the value ``q`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the quantile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the
same as the maximum if ``q=1.0``.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[10, 7, 4], [3, 2, 1]])
>>> a.execute()
array([[10, 7, 4],
[ 3, 2, 1]])
>>> mt.quantile(a, 0.5).execute()
3.5
>>> mt.quantile(a, 0.5, axis=0).execute()
array([6.5, 4.5, 2.5])
>>> mt.quantile(a, 0.5, axis=1).execute()
array([7., 2.])
>>> mt.quantile(a, 0.5, axis=1, keepdims=True).execute()
array([[7.],
[2.]])
>>> m = mt.quantile(a, 0.5, axis=0)
>>> out = mt.zeros_like(m)
>>> mt.quantile(a, 0.5, axis=0, out=out).execute()
array([6.5, 4.5, 2.5])
>>> m.execute()
array([6.5, 4.5, 2.5])
"""
handle_non_numeric = kw.pop("handle_non_numeric", None)
if len(kw) > 0: # pragma: no cover
raise TypeError(
"quantile() got an unexpected keyword " f"argument '{next(iter(kw))}'"
)
if not isinstance(q, ENTITY_TYPE):
q = np.asanyarray(q)
# do check instantly if q is not a tensor
if not _quantile_is_valid(q):
raise ValueError(q_error_msg)
return _quantile_unchecked(
a,
q,
axis=axis,
out=out,
overwrite_input=overwrite_input,
interpolation=interpolation,
keepdims=keepdims,
handle_non_numeric=handle_non_numeric,
)
|
the-stack_0_27007
|
import streamlit as st
from PIL import Image
import numpy as np
from cloudwine.inference import inference_tfidf, inference_docvec, inference_bert
from cloudwine.utils import show_metrics_graph, download_data, update_embedding, init_data, logger
# Start execution
def main():
# Download data from GCP bucket
download_data()
# Initialise the data module for the app
data_module = init_data()
# Determine app mode to run
app_mode = st.sidebar.selectbox("Choose the app mode",
["Run the app", "Data Exploration", "Model deep dive"])
if app_mode == "Run the app":
run_app(data_module)
elif app_mode == "Data Exploration":
run_data_analysis(data_module)
elif app_mode == "Model deep dive":
run_model_analysis()
# Main app
def run_app(data_module):
# Render mardown text
f = open("cloudwine/resources/intro.md", 'r')
readme_text = st.markdown(f.read())
f.close()
# Filters
# Select model to use
embed_model = st.sidebar.selectbox("Embedding model",
["BERT", "Doc2Vec", "TF-IDF"])
update_embedding(data_module, embed_model)
st.sidebar.header("Filters")
# Select varieties
df = data_module.data
variety_options = df.variety.value_counts().index.tolist()
# dataframe['name'].value_counts()[:n].index.tolist()
varieties = st.sidebar.multiselect( 'Wine Variety',
variety_options)
price_range = st.sidebar.slider("Price Range ($)", 0, int(df.price.max()), (0, int(df.price.max())), 5)
# Apply filters
df_subset = apply_filters(df, varieties, price_range)
data_module.data_filtered = df_subset
# Main page
# Input description from user
user_input = st.text_area("Describe your favourite wine here")
if user_input:
st.table(perform_inference(data_module, user_input, embed_model))
else:
if varieties or (price_range != (0,df.price.max())):
st.table(df_subset[['title', 'description', 'variety', 'price']])
# Analysis app
def run_data_analysis(data_module):
df = data_module.data
st.image(load_image('cloudwine/resources/wine_reviews_image.jpg'), use_column_width=True)
st.title('"Wine Reviews" Dataset Analysis')
st.write('One this page we explore the Kaggle 2017 "Wine Reviews" dataset.')
# Dataframe samples
st.subheader("Sample of raw dataset")
st.write(df.head())
st.subheader("Features used in training")
st.table(df[['description', 'variety', 'region_1']].head(3))
# Description length histogram
st.subheader("Text Length")
hist_values = np.histogram(
df['description'].str.len().tolist(), bins=24)[0]
st.bar_chart(hist_values)
# Model analysis app
def run_model_analysis():
# f = open("cloudwine/resources/model_analysis.md", 'r')
# readme_text = st.markdown(f.read())
# f.close()
st.title('Model Evaluation')
st.markdown("This project explored three different Natural Language Processing (NLP) text vectorisation techniques:")
st.markdown("1. [Term Frequency-Inverse Document Frequency (TF-IDF)] (https://towardsdatascience.com/tf-idf-for-document-ranking-from-scratch-in-python-on-real-world-dataset-796d339a4089)")
st.markdown("2. [Doc2Vec] (https://medium.com/wisio/a-gentle-introduction-to-doc2vec-db3e8c0cce5e)")
st.markdown("3. [Bidirectional Encoder Representations from Transformers (BERT)] (https://towardsdatascience.com/word-embedding-using-bert-in-python-dd5a86c00342)")
st.subheader("Metric for sucess")
st.markdown("""So how do we determine which model gives the best vector representation?
First step is to cluster the text vectors by creating a joint label of 'variety' and 'region', as these are the biggest influencers of taste.
As the embedding model improves and incorporates more semantic relationships between text, the the intra-cluster cosine similarity will increase
(see [diversity metric] (https://gab41.lab41.org/recommender-systems-its-not-all-about-the-accuracy-562c7dceeaff#.5exl13wqv).""")
st.image(load_image('cloudwine/resources/metric_figure.png'), use_column_width=True)
st.subheader('Experimental Results')
st.write("""The BERT embedding outperformed the baseline TF-IDF model by over 100%. To see the different models in action, go to 'Run the App'
in the sidebar and select a model type in the dropdown.""")
show_metrics_graph()
# Returns dataframe subset with filters applied
def apply_filters(df, varieties, price_range):
df_subset = df.copy()
# Varieties selection
if varieties:
df_subset = df_subset[df_subset['variety'].isin(varieties)]
# Price range selection
df_subset = df_subset[(df_subset['price']>price_range[0]) & (df_subset['price']<price_range[1])]
return df_subset
# @st.cache
def perform_inference(data_module, user_input, embed_model):
# Display recommendations as table
if embed_model == 'BERT':
df_recommend = inference_bert(data_module, user_input)
elif embed_model == "Doc2Vec":
df_recommend = inference_docvec(data_module, user_input)
elif embed_model == "TF-IDF":
df_recommend = inference_tfidf(data_module, user_input)
return df_recommend[['title', 'description', 'variety', 'price', 'similarity']]
@st.cache
def load_image(path):
im =Image.open(path)
return im
if __name__ == "__main__":
# execute only if run as a script
main()
|
the-stack_0_27011
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def multirun_dependencies():
_maybe(
http_archive,
name = "bazel_skylib",
sha256 = "b5f6abe419da897b7901f90cbab08af958b97a8f3575b0d3dd062ac7ce78541f",
strip_prefix = "bazel-skylib-0.5.0",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/0.5.0.tar.gz"],
)
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
|
the-stack_0_27012
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetStatsURL(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0x812c2ae6``
Parameters:
peer: :obj:`InputPeer <pyrogram.raw.base.InputPeer>`
params: ``str``
dark (optional): ``bool``
Returns:
:obj:`StatsURL <pyrogram.raw.base.StatsURL>`
"""
__slots__: List[str] = ["peer", "params", "dark"]
ID = 0x812c2ae6
QUALNAME = "functions.messages.GetStatsURL"
def __init__(self, *, peer: "raw.base.InputPeer", params: str, dark: Union[None, bool] = None) -> None:
self.peer = peer # InputPeer
self.params = params # string
self.dark = dark # flags.0?true
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetStatsURL":
flags = Int.read(data)
dark = True if flags & (1 << 0) else False
peer = TLObject.read(data)
params = String.read(data)
return GetStatsURL(peer=peer, params=params, dark=dark)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.dark is not None else 0
data.write(Int(flags))
data.write(self.peer.write())
data.write(String(self.params))
return data.getvalue()
|
the-stack_0_27014
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
""" Pre-made gateware that implements an ILA connection serial. """
from nmigen import Elaboratable, Module, Signal, Cat
from ...debug.ila import StreamILA, ILAFrontend
from ...stream import StreamInterface
from ..usb2.device import USBDevice
from ..usb2.request import USBRequestHandler, StallOnlyRequestHandler
from ..usb2.endpoints.stream import USBMultibyteStreamInEndpoint
from usb_protocol.types import USBRequestType
from usb_protocol.emitters import DeviceDescriptorCollection
from usb_protocol.emitters.descriptors import cdc
class USBIntegratedLogicAnalyzer(Elaboratable):
""" Pre-made gateware that presents a USB-connected ILA.
Samples are presented over a USB endpoint.
"""
BULK_ENDPOINT_NUMBER = 1
def __init__(self, *args, bus=None, delayed_connect=False, max_packet_size=512, **kwargs):
self._delayed_connect = delayed_connect
self._max_packet_size = max_packet_size
# Store our USB bus.
self._bus = bus
# Force the ILA's output into the USB domain.
kwargs['o_domain'] = 'usb'
# Create our core ILA, which we'll use later.
self.ila = StreamILA(*args, **kwargs)
#
# I/O port
#
# Copy some core parameters from our inner ILA.
self.signals = self.ila.signals
self.sample_width = self.ila.sample_width
self.sample_depth = self.ila.sample_depth
self.sample_rate = self.ila.sample_rate
self.sample_period = self.ila.sample_period
self.bits_per_sample = self.ila.bits_per_sample
self.bytes_per_sample = self.ila.bytes_per_sample
# Expose our ILA's trigger and status ports directly.
self.trigger = self.ila.trigger
self.sampling = self.ila.sampling
self.complete = self.ila.complete
def create_descriptors(self):
""" Create the descriptors we want to use for our device. """
descriptors = DeviceDescriptorCollection()
#
# We'll add the major components of the descriptors we we want.
# The collection we build here will be necessary to create a standard endpoint.
#
# We'll need a device descriptor...
with descriptors.DeviceDescriptor() as d:
d.idVendor = 0x16d0
d.idProduct = 0x05a5
d.iManufacturer = "LUNA"
d.iProduct = "Integrated Logic Analyzer"
d.iSerialNumber = "no serial"
d.bNumConfigurations = 1
# ... and a description of the USB configuration we'll provide.
with descriptors.ConfigurationDescriptor() as c:
with c.InterfaceDescriptor() as i:
i.bInterfaceNumber = 0
with i.EndpointDescriptor() as e:
e.bEndpointAddress = 0x80 | self.BULK_ENDPOINT_NUMBER
e.wMaxPacketSize = self._max_packet_size
return descriptors
def elaborate(self, platform):
m = Module()
m.submodules.ila = self.ila
# If we have a bus name rather than a bus object,
# request the bus from our platform.
if isinstance(self._bus, str):
self._bus = platform.request(self._bus)
# If we have no bus, grab the platform's default USB connection.
if self._bus is None:
self._bus = platform.request(platform.default_usb_connection)
m.submodules.usb = usb = USBDevice(bus=self._bus)
# Add our standard control endpoint to the device.
descriptors = self.create_descriptors()
usb.add_standard_control_endpoint(descriptors)
# Add a stream endpoint to our device.
stream_ep = USBMultibyteStreamInEndpoint(
endpoint_number=self.BULK_ENDPOINT_NUMBER,
max_packet_size=self._max_packet_size,
byte_width=self.ila.bytes_per_sample
)
usb.add_endpoint(stream_ep)
# Handle our connection criteria: we'll either connect immediately,
# or once sampling is done, depending on our _delayed_connect setting.
connect = Signal()
if self._delayed_connect:
with m.If(self.ila.complete):
m.d.usb += connect.eq(1)
else:
m.d.comb += connect.eq(1)
# Connect up our I/O and our ILA streams.
m.d.comb += [
stream_ep.stream .stream_eq(self.ila.stream),
usb.connect .eq(connect)
]
return m
class USBIntegratedLogicAnalyzerFrontend(ILAFrontend):
""" Frontend for USB-attached integrated logic analyzers.
Parameters
------------
delay: int
The number of seconds to wait before trying to connect.
ila: IntegratedLogicAnalyzer
The ILA object to work with.
"""
def __init__(self, *args, ila, delay=3, **kwargs):
import usb
import time
# If we have a connection delay, wait that long.
if delay:
time.sleep(delay)
# Create our USB connection the device
self._device = usb.core.find(idVendor=0x16d0, idProduct=0x5a5)
super().__init__(ila)
def _split_samples(self, all_samples):
""" Returns an iterator that iterates over each sample in the raw binary of samples. """
from luna.apollo.support.bits import bits
sample_width_bytes = self.ila.bytes_per_sample
# Iterate over each sample, and yield its value as a bits object.
for i in range(0, len(all_samples), sample_width_bytes):
raw_sample = all_samples[i:i + sample_width_bytes]
sample_length = len(Cat(self.ila.signals))
yield bits.from_bytes(raw_sample, length=sample_length, byteorder='little')
def _read_samples(self):
""" Reads a set of ILA samples, and returns them. """
sample_width_bytes = self.ila.bytes_per_sample
total_to_read = self.ila.sample_depth * sample_width_bytes
# Fetch all of our samples from the given device.
all_samples = self._device.read(0x81, total_to_read, timeout=0)
return list(self._split_samples(all_samples))
|
the-stack_0_27015
|
import logging
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
class ModelConfig:
""" Vanilla GAN Model Config """
latent_size = None
discriminator_first_hidden_size = None
discriminator_second_hidden_size = None
discriminator_dropout = None
generator_first_hidden_size = None
generator_second_hidden_size = None
generator_dropout = None
negative_slope = None
image_size = None
def __init__(self, config):
for k, v in config.items():
setattr(self, k, v)
if not self.latent_size:
logger.error("latent_size is not implemented")
raise NotImplementedError
if not self.discriminator_first_hidden_size:
logger.error("discriminator_first_hidden_size is not implemented")
raise NotImplementedError
if not self.discriminator_second_hidden_size:
logger.error("discriminator_second_hidden_size is not implemented")
raise NotImplementedError
if not self.discriminator_dropout:
logger.error("discriminator_dropout is not implemented")
raise NotImplementedError
if not self.generator_first_hidden_size:
logger.error("generator_first_hidden_size is not implemented")
raise NotImplementedError
if not self.generator_second_hidden_size:
logger.error("generator_second_hidden_size is not implemented")
raise NotImplementedError
if not self.generator_dropout:
logger.error("generator_dropout is not implemented")
raise NotImplementedError
if not self.negative_slope:
logger.error("negative_slope is not implemented")
raise NotImplementedError
if not self.image_size:
logger.error("image_size is not implemented")
raise NotImplementedError
class Generator(nn.Module):
"""
Generator with Linear Layers
input : Gaussian Random Noise z
output : Generated Image
"""
def __init__(self, config):
super(Generator, self).__init__()
assert int(config.image_size ** 0.5) ** 2 == config.image_size, "image size should be square number"
self.image_len = int(config.image_size ** 0.5)
self.generator = nn.Sequential(
nn.Linear(config.latent_size, config.generator_first_hidden_size),
nn.LeakyReLU(config.negative_slope),
nn.Dropout(config.generator_dropout),
nn.Linear(config.generator_first_hidden_size, config.generator_second_hidden_size),
nn.LeakyReLU(config.negative_slope),
nn.Dropout(config.generator_dropout),
nn.Linear(config.generator_second_hidden_size, config.image_size),
nn.Tanh()
)
self.apply(self.init_weights)
logger.info(f"number of total parameters for G: {sum(p.numel() for p in self.parameters())}")
logger.info(f"number of trainable parameters for G: {sum(p.numel() for p in self.parameters() if p.requires_grad)}")
@staticmethod
def init_weights(module):
if isinstance(module, nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
def forward(self, x):
return self.generator(x).view(-1, 1, self.image_len, self.image_len)
class Discriminator(nn.Module):
"""
Discriminator with Linear Layers
input : Image
output : 0~1 float (0: Fake Image, 1: Real Image)
"""
def __init__(self, config):
super(Discriminator, self).__init__()
assert int(config.image_size ** 0.5) ** 2 == config.image_size, "Image Size should be Square Number"
self.image_size = config.image_size
self.discriminator = nn.Sequential(
nn.Linear(config.image_size, config.discriminator_first_hidden_size),
nn.LeakyReLU(config.negative_slope),
nn.Dropout(config.discriminator_dropout),
nn.Linear(config.discriminator_first_hidden_size, config.discriminator_second_hidden_size),
nn.LeakyReLU(config.negative_slope),
nn.Dropout(config.discriminator_dropout),
nn.Linear(config.discriminator_second_hidden_size, 1),
nn.Sigmoid()
)
self.apply(self.init_weights)
logger.info(f"number of parameters for D: {sum(p.numel() for p in self.parameters())}")
logger.info(f"number of trainable parameters for D: {sum(p.numel() for p in self.parameters() if p.requires_grad)}")
@staticmethod
def init_weights(module):
if isinstance(module, nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
def forward(self, x):
return self.discriminator(x.view(-1, self.image_size))
|
the-stack_0_27016
|
"""
DatasetConfigManager.py
Author: Jan Zahalka ([email protected])
Handles loading the dataset config.
"""
from django.conf import settings
import json
import os
from data.BlackthornFeatures import BlackthornFeatures
from data.CollectionIndex import CollectionIndex
class DatasetConfigManager:
"""
Encapsulates all operations concerning dataset configs, which includes not
only loading and validating the JSON configs, but notably also loading the
datasets into the system during runtime.
This is due to the dataset loading procedure being so closely tied to
loading the dataset configs - whilst a decomposition into a
DatasetConfigManager and DatasetLoader class might make semantic sense, it
would be DatasetLoader calling DatasetConfigManager all the time, impairing
code compactness.
"""
datasets = None
DATASET_CONFIG_DIR = os.path.join(settings.BASE_DIR, "data/datasets")
DEFAULT_IMAGE_ORDERING_PATH = "image_ordering.json"
DEFAULT_IL_RAW_FEATURES_PATH = "ii20model/il_raw_features.h5"
DEFAULT_IL_FEATURES_PATH = "ii20model/il_features.npz"
DEFAULT_INDEX_FEATURES_PATH = "ii20model/index_features.h5"
DEFAULT_INDEX_DIR = "ii20model/index"
@classmethod
def load_dataset_config(cls, dataset_name):
"""
Loads the dataset config, ensuring it is in the correct format with
correct values. See the README file for the specs of how to format the
config.
Parameters
----------
dataset_name : str
The name of the dataset. The method will look for a
<dataset_name>.json config file in DATASET_CONFIG_DIR.
Returns
-------
dict
The dataset config dict with proper values.
"""
# Open the dataset config and parse it into JSON
dataset_config_path = os.path.join(cls.DATASET_CONFIG_DIR,
"%s.json" % dataset_name)
try:
with open(dataset_config_path, "r") as f:
dataset_config = json.loads(f.read())
# If config file not found, raise an error
except OSError:
err = ("Dataset config file '%s' not found."
% dataset_config_path)
raise DatasetConfigInvalidError(err)
# If config file an invalid JSON, raise an error
except json.decoder.JSONDecodeError:
err = ("Config file '%s' is not a valid JSON file."
% dataset_config_path)
raise DatasetConfigInvalidError(err)
# Fill in default values if there are optional entries missing
dataset_config_path = cls._fill_defaults(dataset_config)
# Validate that all the entries are correct
cls._validate_config(dataset_config)
return dataset_config
@classmethod
def load_datasets(cls):
"""
Loads the datasets into II-20 for use in the analytic session. This
method is called on II-20 start-up (hooked via data.__init__.py and
data.apps.py).
It goes over the configs in the dataset config dir, and loads in the
image ordering, IL features (BlackthornFeatures) and collection index
for all datasets that have the "load" flag set.
"""
cls.datasets = dict()
for dataset_config_file in os.listdir(cls.DATASET_CONFIG_DIR):
dataset_name = dataset_config_file.split(".")[0]
dataset_config = cls.load_dataset_config(dataset_name)
if dataset_config["load"]:
cls.datasets[dataset_name] = dataset_config
root_dir = dataset_config["root_dir"]
il_features_abs_path =\
os.path.join(root_dir, dataset_config["il_features_path"])
index_dir_abs_path =\
os.path.join(root_dir, dataset_config["index_dir"])
image_ordering_abs_path =\
os.path.join(root_dir, dataset_config["image_ordering"])
cls.datasets[dataset_name]["il_features"] =\
BlackthornFeatures(il_features_abs_path)
cls.datasets[dataset_name]["index"] =\
CollectionIndex(index_dir_abs_path)
with open(image_ordering_abs_path, "r") as f:
cls.datasets[dataset_name]["image_ordering"] =\
json.loads(f.read())
@classmethod
def loaded_datasets_list(cls):
"""
Produces a list of the names of the loaded and thus ready for
analytics.
Returns
-------
dict_keys
The names of the datasets loaded in II-20.
"""
return cls.datasets.keys()
@classmethod
def image_url(cls, dataset, image_idx):
"""
Constructs an image URL from the given image index.
Parameters
----------
dataset : str
The name of the dataset.
image_idx : int
The image index.
Returns
-------
str
The image URL.
"""
return os.path.join(settings.STATIC_URL, dataset,
cls.datasets[dataset]["image_ordering"][image_idx])
@classmethod
def n(cls, dataset):
"""
Gets the number of images in the dataset.
Parameters
----------
dataset : str
The name of the dataset.
Returns
-------
n : int
The number of images in the dataset.
"""
return cls.datasets[dataset]["il_features"].n
@classmethod
def index(cls, dataset):
"""
Gets the collection index of the given dataset.
Parameters
----------
dataset : str
The name of the dataset.
Returns
-------
data.CollectionIndex
The index of the dataset.
"""
return cls.datasets[dataset]["index"]
@classmethod
def il_features(cls, dataset):
"""
Gets the IL features extracted from the given dataset.
Parameters
----------
dataset : str
The name of the dataset.
Returns
-------
data.BlackthornFeatures
The dataset's IL features.
"""
return cls.datasets[dataset]["il_features"]
@classmethod
def _fill_defaults(cls, dataset_config):
"""
Goes over a dataset config and fills in the default values to all
optional parameters that were not explicitly filled in.
Parameters
----------
dataset_config : dict
The dataset config to be filled in.
Returns
-------
dict
The updated dataset config with the default values filled in
(if any).
"""
if "image_ordering" not in dataset_config:
dataset_config["image_ordering"] = cls.DEFAULT_IMAGE_ORDERING_PATH
if "il_raw_features_path" not in dataset_config:
dataset_config["il_raw_features_path"] =\
cls.DEFAULT_IL_RAW_FEATURES_PATH
if "il_features_path" not in dataset_config:
dataset_config["il_features_path"] = cls.DEFAULT_IL_FEATURES_PATH
if "index_features_path" not in dataset_config:
dataset_config["index_features_path"] =\
cls.DEFAULT_INDEX_FEATURES_PATH
if "index_dir" not in dataset_config:
dataset_config["index_dir"] = cls.DEFAULT_INDEX_DIR
if "il_n_processes" not in dataset_config:
dataset_config["il_n_processes"] =\
BlackthornFeatures.DEFAULT_N_PROCESSES
if "il_n_feat_per_image" not in dataset_config:
dataset_config["il_n_feat_per_image"] =\
BlackthornFeatures.DEFAULT_N_FEAT_PER_IMG
if "index_n_submat" not in dataset_config:
dataset_config["index_n_submat"] = CollectionIndex.DEFAULT_N_SUBMAT
return dataset_config
@classmethod
def _validate_config(cls, dataset_config):
"""
Validates the config values in the dataset config.
Parameters
----------
dataset_config : dict
A dataset config to be validated.
Raises
------
DatasetConfigInvalidError
Raised if there are invalid values in the dataset config.
"""
# Dataset root directory is a mandatory entry, check existence
# and validity
if "root_dir" not in dataset_config:
err = ("The 'root_dir' entry specifying the dataset root "
"directory is missing, but it is mandatory.")
raise DatasetConfigInvalidError(err)
if not os.path.isdir(dataset_config["root_dir"]):
err = ("The 'root_dir' entry does not point to "
"a valid directory.")
raise DatasetConfigInvalidError(err)
# The number of IL processes, number of compressed IL features, and
# number of PQ submatrices in index must all be positive integers
for par in ["il_n_processes", "il_n_feat_per_image", "index_n_submat"]:
err = ("The '%s' parameter in the dataset config JSON must be a "
"positive integer." % par)
try:
if dataset_config[par] <= 0: # Is smaller than 0
raise DatasetConfigInvalidError(err)
except TypeError: # Is not an integer
raise DatasetConfigInvalidError(err)
class DatasetConfigInvalidError(Exception):
"""
Raised in case the dataset config is invalid.
"""
pass
|
the-stack_0_27017
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_no_prefix_suffix(apply_runner):
_, output = apply_runner(FIXTURES_DIR)
assert output['names']['project']['tf'] == 'cloud-dev-tf'
assert output['names']['bucket']['tf-org'] == 'cloud-dev-tf-org'
assert output['labels']['project']['tf'] == {
'environment': 'dev', 'scope': 'global', 'team': 'cloud'}
assert output['labels']['bucket']['tf-org'] == {
'environment': 'dev', 'team': 'cloud'}
def test_prefix(apply_runner):
_, output = apply_runner(FIXTURES_DIR, prefix='myco')
assert output['names']['project']['tf'] == 'myco-cloud-dev-tf'
assert output['names']['bucket']['tf-org'] == 'myco-cloud-dev-tf-org'
def test_suffix(apply_runner):
_, output = apply_runner(FIXTURES_DIR, suffix='myco')
assert output['names']['project']['tf'] == 'cloud-dev-tf-myco'
assert output['names']['bucket']['tf-org'] == 'cloud-dev-tf-org-myco'
def test_resource_prefix(apply_runner):
_, output = apply_runner(FIXTURES_DIR, prefix='myco',
use_resource_prefixes='true')
assert output['names']['project']['tf'] == 'project-myco-cloud-dev-tf'
assert output['names']['bucket']['tf-org'] == 'bucket-myco-cloud-dev-tf-org'
def test_separator(apply_runner):
_, output = apply_runner(
FIXTURES_DIR, separator_override='{ dataset = "_" }')
assert output['names']['dataset'] == {
'foobar': 'cloud_dev_foobar', 'frobniz': 'cloud_dev_frobniz'}
|
the-stack_0_27018
|
# Copyright (c) 2004,2018 Python-Metar Developers.
# Distributed under the terms of the BSD 2-Clause License.
# SPDX-License-Identifier: BSD-2-Clause
"""Python classes to represent dimensioned quantities used in weather reports.
"""
import re
from math import sin, cos, atan2, sqrt
# exceptions
class UnitsError(Exception):
"""Exception raised when unrecognized units are used."""
pass
# regexp to match fractions (used by distance class)
# [Note: numerator of fraction must be single digit.]
FRACTION_RE = re.compile(r"^((?P<int>\d+)\s*)?(?P<num>\d)/(?P<den>\d+)$")
# classes representing dimensioned values in METAR reports
class temperature(object):
"""A class representing a temperature value."""
legal_units = ["F", "C", "K"]
def __init__(self, value, units="C"):
if not units.upper() in temperature.legal_units:
raise UnitsError("unrecognized temperature unit: '" + units + "'")
self._units = units.upper()
try:
self._value = float(value)
except ValueError:
if value.startswith("M"):
self._value = -float(value[1:])
else:
raise ValueError("temperature must be integer: '" + str(value) + "'")
def __str__(self):
return self.string()
def value(self, units=None):
"""Return the temperature in the specified units."""
if units is None:
return self._value
else:
if not units.upper() in temperature.legal_units:
raise UnitsError("unrecognized temperature unit: '" + units + "'")
units = units.upper()
if self._units == "C":
celsius_value = self._value
elif self._units == "F":
celsius_value = (self._value - 32.0) / 1.8
elif self._units == "K":
celsius_value = self._value - 273.15
if units == "C":
return celsius_value
elif units == "K":
return 273.15 + celsius_value
elif units == "F":
return 32.0 + celsius_value * 1.8
def string(self, units=None):
"""Return a string representation of the temperature, using the given units."""
if units is None:
units = self._units
else:
if not units.upper() in temperature.legal_units:
raise UnitsError("unrecognized temperature unit: '" + units + "'")
units = units.upper()
val = self.value(units)
if units == "C":
return "%.1f C" % val
elif units == "F":
return "%.1f F" % val
elif units == "K":
return "%.1f K" % val
class pressure(object):
"""A class representing a barometric pressure value."""
legal_units = ["MB", "HPA", "IN"]
def __init__(self, value, units="MB"):
if not units.upper() in pressure.legal_units:
raise UnitsError("unrecognized pressure unit: '" + units + "'")
self._value = float(value)
self._units = units.upper()
def __str__(self):
return self.string()
def value(self, units=None):
"""Return the pressure in the specified units."""
if units is None:
return self._value
else:
if not units.upper() in pressure.legal_units:
raise UnitsError("unrecognized pressure unit: '" + units + "'")
units = units.upper()
if units == self._units:
return self._value
if self._units == "IN":
mb_value = self._value * 33.86398
else:
mb_value = self._value
if units == "MB" or units == "HPA":
return mb_value
elif units == "IN":
return mb_value / 33.86398
else:
raise UnitsError("unrecognized pressure unit: '" + units + "'")
def string(self, units=None):
"""Return a string representation of the pressure, using the given units."""
if not units:
units = self._units
else:
if not units.upper() in pressure.legal_units:
raise UnitsError("unrecognized pressure unit: '" + units + "'")
units = units.upper()
val = self.value(units)
if units == "MB":
return "%.1f mb" % val
elif units == "HPA":
return "%.1f hPa" % val
elif units == "IN":
return "%.2f inches" % val
class speed(object):
"""A class representing a wind speed value."""
legal_units = ["KT", "MPS", "KMH", "MPH"]
legal_gtlt = [">", "<"]
def __init__(self, value, units=None, gtlt=None):
if not units:
self._units = "MPS"
else:
if not units.upper() in speed.legal_units:
raise UnitsError("unrecognized speed unit: '" + units + "'")
self._units = units.upper()
if gtlt and not gtlt in speed.legal_gtlt:
raise ValueError(
"unrecognized greater-than/less-than symbol: '" + gtlt + "'"
)
self._gtlt = gtlt
self._value = float(value)
def __str__(self):
return self.string()
def value(self, units=None):
"""Return the speed in the specified units."""
if not units:
return self._value
else:
if not units.upper() in speed.legal_units:
raise UnitsError("unrecognized speed unit: '" + units + "'")
units = units.upper()
if units == self._units:
return self._value
if self._units == "KMH":
mps_value = self._value / 3.6
elif self._units == "KT":
mps_value = self._value * 0.514444
elif self._units == "MPH":
mps_value = self._value * 0.447000
else:
mps_value = self._value
if units == "KMH":
return mps_value * 3.6
elif units == "KT":
return mps_value / 0.514444
elif units == "MPH":
return mps_value / 0.447000
elif units == "MPS":
return mps_value
def string(self, units=None):
"""Return a string representation of the speed in the given units."""
if not units:
units = self._units
else:
if not units.upper() in speed.legal_units:
raise UnitsError("unrecognized speed unit: '" + units + "'")
units = units.upper()
val = self.value(units)
if units == "KMH":
text = "%.0f km/h" % val
elif units == "KT":
text = "%.0f knots" % val
elif units == "MPH":
text = "%.0f mph" % val
elif units == "MPS":
text = "%.0f mps" % val
if self._gtlt == ">":
text = "greater than " + text
elif self._gtlt == "<":
text = "less than " + text
return text
class distance(object):
"""A class representing a distance value."""
legal_units = ["SM", "MI", "M", "KM", "FT", "IN"]
legal_gtlt = [">", "<"]
def __init__(self, value, units=None, gtlt=None):
if not units:
self._units = "M"
else:
if not units.upper() in distance.legal_units:
raise UnitsError("unrecognized distance unit: '" + units + "'")
self._units = units.upper()
try:
if value.startswith("M"):
value = value[1:]
gtlt = "<"
elif value.startswith("P"):
value = value[1:]
gtlt = ">"
except:
pass
if gtlt and not gtlt in distance.legal_gtlt:
raise ValueError(
"unrecognized greater-than/less-than symbol: '" + gtlt + "'"
)
self._gtlt = gtlt
try:
self._value = float(value)
self._num = None
self._den = None
except ValueError:
mf = FRACTION_RE.match(value)
if not mf:
raise ValueError("distance is not parseable: '" + str(value) + "'")
df = mf.groupdict()
self._num = int(df["num"])
self._den = int(df["den"])
self._value = float(self._num) / float(self._den)
if df["int"]:
self._value += float(df["int"])
def __str__(self):
return self.string()
def value(self, units=None):
"""Return the distance in the specified units."""
if not units:
return self._value
else:
if not units.upper() in distance.legal_units:
raise UnitsError("unrecognized distance unit: '" + units + "'")
units = units.upper()
if units == self._units:
return self._value
if self._units == "SM" or self._units == "MI":
m_value = self._value * 1609.344
elif self._units == "FT":
m_value = self._value / 3.28084
elif self._units == "IN":
m_value = self._value / 39.3701
elif self._units == "KM":
m_value = self._value * 1000
else:
m_value = self._value
if units == "SM" or units == "MI":
return m_value / 1609.344
elif units == "FT":
return m_value * 3.28084
elif units == "IN":
return m_value * 39.3701
elif units == "KM":
return m_value / 1000
elif units == "M":
return m_value
def string(self, units=None):
"""Return a string representation of the distance in the given units."""
if not units:
units = self._units
else:
if not units.upper() in distance.legal_units:
raise UnitsError("unrecognized distance unit: '" + units + "'")
units = units.upper()
if self._num and self._den and units == self._units:
val = int(self._value - self._num / self._den)
if val:
text = "%d %d/%d" % (val, self._num, self._den)
else:
text = "%d/%d" % (self._num, self._den)
else:
if units == "KM":
text = "%.1f" % self.value(units)
else:
text = "%.0f" % self.value(units)
if units == "SM" or units == "MI":
text += " miles"
elif units == "M":
text += " meters"
elif units == "KM":
text += " km"
elif units == "FT":
text += " feet"
elif units == "IN":
text += " inches"
if self._gtlt == ">":
text = "greater than " + text
elif self._gtlt == "<":
text = "less than " + text
return text
class direction(object):
"""A class representing a compass direction."""
compass_dirs = {
"N": 0.0,
"NNE": 22.5,
"NE": 45.0,
"ENE": 67.5,
"E": 90.0,
"ESE": 112.5,
"SE": 135.0,
"SSE": 157.5,
"S": 180.0,
"SSW": 202.5,
"SW": 225.0,
"WSW": 247.5,
"W": 270.0,
"WNW": 292.5,
"NW": 315.0,
"NNW": 337.5,
}
def __init__(self, d):
if d in direction.compass_dirs:
self._compass = d
self._degrees = direction.compass_dirs[d]
else:
self._compass = None
value = float(d)
if value < 0.0 or value > 360.0:
raise ValueError("direction must be 0..360: '" + str(value) + "'")
self._degrees = value
def __str__(self):
return self.string()
def value(self):
"""Return the numerical direction, in degrees."""
return self._degrees
def string(self):
"""Return a string representation of the numerical direction."""
return "%.0f degrees" % self._degrees
def compass(self):
"""Return the compass direction, e.g., "N", "ESE", etc.)."""
if not self._compass:
degrees = 22.5 * round(self._degrees / 22.5)
if degrees == 360.0:
self._compass = "N"
else:
for name, d in direction.compass_dirs.items():
if d == degrees:
self._compass = name
break
return self._compass
class precipitation(object):
"""A class representing a precipitation value."""
legal_units = ["IN", "CM"]
legal_gtlt = [">", "<"]
def __init__(self, value, units=None, gtlt=None):
if not units:
self._units = "IN"
else:
if not units.upper() in precipitation.legal_units:
raise UnitsError("unrecognized precipitation unit: '" + units + "'")
self._units = units.upper()
try:
if value.startswith("M"):
value = value[1:]
gtlt = "<"
elif value.startswith("P"):
value = value[1:]
gtlt = ">"
except:
pass
if gtlt and not gtlt in precipitation.legal_gtlt:
raise ValueError(
"unrecognized greater-than/less-than symbol: '" + gtlt + "'"
)
self._gtlt = gtlt
self._value = float(value)
# In METAR world, a string of just four or three zeros denotes trace
self._istrace = value in ["0000", "000"]
def __str__(self):
return self.string()
def value(self, units=None):
"""Return the precipitation in the specified units."""
if not units:
return self._value
else:
if not units.upper() in precipitation.legal_units:
raise UnitsError("unrecognized precipitation unit: '" + units + "'")
units = units.upper()
if units == self._units:
return self._value
if self._units == "CM":
i_value = self._value * 2.54
else:
i_value = self._value
if units == "CM":
return i_value * 2.54
else:
return i_value
def string(self, units=None):
"""Return a string representation of the precipitation in the given units."""
if not units:
units = self._units
else:
if not units.upper() in precipitation.legal_units:
raise UnitsError("unrecognized precipitation unit: '" + units + "'")
units = units.upper()
# A trace is a trace in any units
if self._istrace:
return "Trace"
text = "%.2f" % self.value(units)
if units == "CM":
text += "cm"
else:
text += "in"
if self._gtlt == ">":
text = "greater than " + text
elif self._gtlt == "<":
text = "less than " + text
return text
def istrace(self):
"""Return a boolean on if this precipitation was a trace"""
return self._istrace
class position(object):
"""A class representing a location on the earth's surface."""
def __init__(self, latitude=None, longitude=None):
self.latitude = latitude
self.longitude = longitude
def __str__(self):
return self.string()
def getdistance(self, position2):
"""
Calculate the great-circle distance to another location using the Haversine
formula. See <http://www.movable-type.co.uk/scripts/LatLong.html>
and <http://mathforum.org/library/drmath/sets/select/dm_lat_long.html>
"""
earth_radius = 637100.0
lat1 = self.latitude
long1 = self.longitude
lat2 = position2.latitude
long2 = position2.longitude
a = sin(0.5(lat2 - lat1)) + cos(lat1) * cos(lat2) * sin(
0.5 * (long2 - long1) ** 2
)
c = 2.0 * atan(sqrt(a) * sqrt(1.0 - a))
d = distance(earth_radius * c, "M")
return d
def getdirection(self, position2):
"""
Calculate the initial direction to another location. (The direction
typically changes as you trace the great circle path to that location.)
See <http://www.movable-type.co.uk/scripts/LatLong.html>.
"""
lat1 = self.latitude
long1 = self.longitude
lat2 = position2.latitude
long2 = position2.longitude
s = -sin(long1 - long2) * cos(lat2)
c = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(long1 - long2)
d = atan2(s, c) * 180.0 / math.pi
if d < 0.0:
d += 360.0
return direction(d)
|
the-stack_0_27022
|
"""Three-dimensional mobjects."""
__all__ = [
"ThreeDVMobject",
"ParametricSurface",
"Sphere",
"Dot3D",
"Cube",
"Prism",
"Cone",
"Arrow3D",
"Cylinder",
"Line3D",
"Torus",
]
import numpy as np
from ..constants import *
from ..mobject.geometry import Circle, Square
from ..mobject.mobject import *
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.color import *
from ..utils.iterables import tuplify
from ..utils.space_ops import normalize, z_to_vector
class ThreeDVMobject(VMobject):
def __init__(self, shade_in_3d=True, **kwargs):
super().__init__(shade_in_3d=shade_in_3d, **kwargs)
class ParametricSurface(VGroup):
def __init__(
self,
func,
u_min=0,
u_max=1,
v_min=0,
v_max=1,
resolution=32,
surface_piece_config={},
fill_color=BLUE_D,
fill_opacity=1.0,
checkerboard_colors=[BLUE_D, BLUE_E],
stroke_color=LIGHT_GREY,
stroke_width=0.5,
should_make_jagged=False,
pre_function_handle_to_anchor_scale_factor=0.00001,
**kwargs
):
VGroup.__init__(self, **kwargs)
self.u_min = u_min
self.u_max = u_max
self.v_min = v_min
self.v_max = v_max
self.resolution = resolution
self.surface_piece_config = surface_piece_config
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.checkerboard_colors = checkerboard_colors
self.stroke_color = stroke_color
self.stroke_width = stroke_width
self.should_make_jagged = should_make_jagged
self.pre_function_handle_to_anchor_scale_factor = (
pre_function_handle_to_anchor_scale_factor
)
self.func = func
self.setup_in_uv_space()
self.apply_function(lambda p: func(p[0], p[1]))
if self.should_make_jagged:
self.make_jagged()
def get_u_values_and_v_values(self):
res = tuplify(self.resolution)
if len(res) == 1:
u_res = v_res = res[0]
else:
u_res, v_res = res
u_min = self.u_min
u_max = self.u_max
v_min = self.v_min
v_max = self.v_max
u_values = np.linspace(u_min, u_max, u_res + 1)
v_values = np.linspace(v_min, v_max, v_res + 1)
return u_values, v_values
def setup_in_uv_space(self):
u_values, v_values = self.get_u_values_and_v_values()
faces = VGroup()
for i in range(len(u_values) - 1):
for j in range(len(v_values) - 1):
u1, u2 = u_values[i : i + 2]
v1, v2 = v_values[j : j + 2]
face = ThreeDVMobject()
face.set_points_as_corners(
[
[u1, v1, 0],
[u2, v1, 0],
[u2, v2, 0],
[u1, v2, 0],
[u1, v1, 0],
]
)
faces.add(face)
face.u_index = i
face.v_index = j
face.u1 = u1
face.u2 = u2
face.v1 = v1
face.v2 = v2
faces.set_fill(color=self.fill_color, opacity=self.fill_opacity)
faces.set_stroke(
color=self.stroke_color,
width=self.stroke_width,
opacity=self.stroke_opacity,
)
self.add(*faces)
if self.checkerboard_colors:
self.set_fill_by_checkerboard(*self.checkerboard_colors)
def set_fill_by_checkerboard(self, *colors, opacity=None):
n_colors = len(colors)
for face in self:
c_index = (face.u_index + face.v_index) % n_colors
face.set_fill(colors[c_index], opacity=opacity)
return self
# Specific shapes
class Sphere(ParametricSurface):
def __init__(
self,
radius=1,
resolution=(12, 24),
u_min=0.001,
u_max=PI - 0.001,
v_min=0,
v_max=TAU,
**kwargs
):
ParametricSurface.__init__(
self,
self.func,
resolution=resolution,
u_min=u_min,
u_max=u_max,
v_min=v_min,
v_max=v_max,
**kwargs,
)
self.radius = radius
self.scale(self.radius)
def func(
self, u, v
): # FIXME: An attribute defined in manim.mobject.three_dimensions line 56 hides this method
return np.array([np.cos(v) * np.sin(u), np.sin(v) * np.sin(u), np.cos(u)])
class Dot3D(Sphere):
"""A spherical dot.
Parameters
--------
radius : :class:`float`, optional
The radius of the dot.
color : :class:`~.Colors`, optional
The color of the :class:`Dot3D`
Examples
--------
.. manim:: Dot3DExample
:save_last_frame:
class Dot3DExample(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=75*DEGREES, theta=-45*DEGREES)
axes = ThreeDAxes()
dot_1 = Dot3D(color=RED).move_to(axes.coords_to_point(0, 0, 1))
dot_2 = Dot3D(radius=0.1, color=BLUE).move_to(axes.coords_to_point(2, 0, 0))
self.add(axes, dot_1, dot_2)
"""
def __init__(self, radius=DEFAULT_DOT_RADIUS, color=WHITE, **kwargs):
Sphere.__init__(self, radius=radius, **kwargs)
self.set_color(color)
class Cube(VGroup):
def __init__(
self,
side_length=2,
fill_opacity=0.75,
fill_color=BLUE,
stroke_width=0,
**kwargs
):
self.side_length = side_length
super().__init__(
fill_color=fill_color,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
**kwargs,
)
def generate_points(self):
for vect in IN, OUT, LEFT, RIGHT, UP, DOWN:
face = Square(
side_length=self.side_length,
shade_in_3d=True,
)
face.flip()
face.shift(self.side_length * OUT / 2.0)
face.apply_matrix(z_to_vector(vect))
self.add(face)
class Prism(Cube):
def __init__(self, dimensions=[3, 2, 1], **kwargs):
self.dimensions = dimensions
Cube.__init__(self, **kwargs)
def generate_points(self):
Cube.generate_points(self)
for dim, value in enumerate(self.dimensions):
self.rescale_to_fit(value, dim, stretch=True)
class Cone(ParametricSurface):
"""A circular cone.
Can be defined using 2 parameters: its height, and its base radius.
The polar angle, theta, can be calculated using arctan(base_radius /
height) The spherical radius, r, is calculated using the pythagorean
theorem.
Examples
--------
.. manim:: ExampleCone
:save_last_frame:
class ExampleCone(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
cone = Cone(direction=X_AXIS+Y_AXIS+2*Z_AXIS)
self.set_camera_orientation(phi=5*PI/11, theta=PI/9)
self.add(axes, cone)
Parameters
--------
base_radius : :class:`float`
The base radius from which the cone tapers.
height : :class:`float`
The height measured from the plane formed by the base_radius to the apex of the cone.
direction : :class:`numpy.array`
The direction of the apex.
show_base : :class:`bool`
Whether to show the base plane or not.
v_min : :class:`float`
The azimuthal angle to start at.
v_max : :class:`float`
The azimuthal angle to end at.
u_min : :class:`float`
The radius at the apex.
checkerboard_colors : :class:`bool`
Show checkerboard grid texture on the cone.
"""
def __init__(
self,
base_radius=1,
height=1,
direction=Z_AXIS,
show_base=False,
v_min=0,
v_max=TAU,
u_min=0,
checkerboard_colors=False,
**kwargs
):
self.direction = direction
self.theta = PI - np.arctan(base_radius / height)
ParametricSurface.__init__(
self,
self.func,
v_min=v_min,
v_max=v_max,
u_min=u_min,
u_max=np.sqrt(base_radius ** 2 + height ** 2),
checkerboard_colors=checkerboard_colors,
**kwargs,
)
# used for rotations
self._current_theta = 0
self._current_phi = 0
if show_base:
self.base_circle = Circle(
radius=base_radius,
color=self.fill_color,
fill_opacity=self.fill_opacity,
stroke_width=0,
)
self.base_circle.shift(height * IN)
self.add(self.base_circle)
self._rotate_to_direction()
def func(self, u, v):
"""Converts from spherical coordinates to cartesian.
Parameters
---------
u : :class:`float`
The radius.
v : :class:`float`
The azimuthal angle.
"""
r = u
phi = v
return np.array(
[
r * np.sin(self.theta) * np.cos(phi),
r * np.sin(self.theta) * np.sin(phi),
r * np.cos(self.theta),
]
)
def _rotate_to_direction(self):
x, y, z = self.direction
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
theta = np.arccos(z / r)
if x == 0:
if y == 0: # along the z axis
phi = 0
else:
phi = np.arctan(np.inf)
if y < 0:
phi += PI
else:
phi = np.arctan(y / x)
if x < 0:
phi += PI
# Undo old rotation (in reverse order)
self.rotate(-self._current_phi, Z_AXIS, about_point=ORIGIN)
self.rotate(-self._current_theta, Y_AXIS, about_point=ORIGIN)
# Do new rotation
self.rotate(theta, Y_AXIS, about_point=ORIGIN)
self.rotate(phi, Z_AXIS, about_point=ORIGIN)
# Store values
self._current_theta = theta
self._current_phi = phi
def set_direction(self, direction):
self.direction = direction
self._rotate_to_direction()
def get_direction(self):
return self.direction
class Cylinder(ParametricSurface):
"""A cylinder, defined by its height, radius and direction,
Examples
---------
.. manim:: ExampleCylinder
:save_last_frame:
class ExampleCylinder(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
cylinder = Cylinder(radius=2, height=3)
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, cylinder)
Parameters
---------
radius : :class:`float`
The radius of the cylinder.
height : :class:`float`
The height of the cylinder.
direction : :class:`numpy.array`
The direction of the central axis of the cylinder.
v_min : :class:`float`
The height along the height axis (given by direction) to start on.
v_max : :class:`float`
The height along the height axis (given by direction) to end on.
show_ends : :class:`bool`
Whether to show the end caps or not.
"""
def __init__(
self,
radius=1,
height=2,
direction=Z_AXIS,
v_min=0,
v_max=TAU,
show_ends=True,
resolution=24,
**kwargs
):
self._height = height
self.radius = radius
ParametricSurface.__init__(
self,
self.func,
resolution=resolution,
u_min=-self._height / 2,
u_max=self._height / 2,
v_min=v_min,
v_max=v_max,
**kwargs,
)
if show_ends:
self.add_bases()
self._current_phi = 0
self._current_theta = 0
self.set_direction(direction)
def func(self, u, v):
"""Converts from cylindrical coordinates to cartesian.
Parameters
---------
u : :class:`float`
The height.
v : :class:`float`
The azimuthal angle.
"""
height = u
phi = v
r = self.radius
return np.array([r * np.cos(phi), r * np.sin(phi), height])
def add_bases(self):
"""Adds the end caps of the cylinder."""
self.base_top = Circle(
radius=self.radius,
color=self.fill_color,
fill_opacity=self.fill_opacity,
shade_in_3d=True,
stroke_width=0,
)
self.base_top.shift(self.u_max * IN)
self.base_bottom = Circle(
radius=self.radius,
color=self.fill_color,
fill_opacity=self.fill_opacity,
shade_in_3d=True,
stroke_width=0,
)
self.base_bottom.shift(self.u_min * IN)
self.add(self.base_top, self.base_bottom)
def _rotate_to_direction(self):
x, y, z = self.direction
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
theta = np.arccos(z / r)
if x == 0:
if y == 0: # along the z axis
phi = 0
else: # along the x axis
phi = np.arctan(np.inf)
if y < 0:
phi += PI
else:
phi = np.arctan(y / x)
if x < 0:
phi += PI
# undo old rotation (in reverse direction)
self.rotate(-self._current_phi, Z_AXIS, about_point=ORIGIN)
self.rotate(-self._current_theta, Y_AXIS, about_point=ORIGIN)
# do new rotation
self.rotate(theta, Y_AXIS, about_point=ORIGIN)
self.rotate(phi, Z_AXIS, about_point=ORIGIN)
# store new values
self._current_theta = theta
self._current_phi = phi
def set_direction(self, direction):
# if get_norm(direction) is get_norm(self.direction):
# pass
self.direction = direction
self._rotate_to_direction()
def get_direction(self):
return self.direction
class Line3D(Cylinder):
"""A cylindrical line, for use in ThreeDScene.
Examples
---------
.. manim:: ExampleLine3D
:save_last_frame:
class ExampleLine3D(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
line = Line3D(start=np.array([0, 0, 0]), end=np.array([2, 2, 2]))
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, line)
Parameters
---------
start : :class:`numpy.array`
The start position of the line.
end : :class:`numpy.array`
The end position of the line.
thickness : :class:`float`
The thickness of the line.
"""
def __init__(self, start=LEFT, end=RIGHT, thickness=0.02, color=None, **kwargs):
self.thickness = thickness
self.set_start_and_end_attrs(start, end, **kwargs)
if color is not None:
self.set_color(color)
def set_start_and_end_attrs(self, start, end, **kwargs):
"""Sets the start and end points of the line.
If either ``start`` or ``end`` are :class:`~.Mobject`s, this gives their centers.
"""
rough_start = self.pointify(start)
rough_end = self.pointify(end)
self.vect = rough_end - rough_start
self.length = np.linalg.norm(self.vect)
self.direction = normalize(self.vect)
# Now that we know the direction between them,
# we can the appropriate boundary point from
# start and end, if they're mobjects
self.start = self.pointify(start, self.direction)
self.end = self.pointify(end, -self.direction)
Cylinder.__init__(
self,
height=np.linalg.norm(self.vect),
radius=self.thickness,
direction=self.direction,
**kwargs,
)
self.shift((self.start + self.end) / 2)
def pointify(self, mob_or_point, direction=None):
if isinstance(mob_or_point, Mobject):
mob = mob_or_point
if direction is None:
return mob.get_center()
else:
return mob.get_boundary_point(direction)
return np.array(mob_or_point)
def get_start(self):
return self.start
def get_end(self):
return self.end
class Arrow3D(Line3D):
"""An arrow made out of a cylindrical line and a conical tip.
Examples
---------
.. manim:: ExampleArrow3D
:save_last_frame:
class ExampleArrow3D(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
arrow = Arrow3D(start=np.array([0, 0, 0]), end=np.array([2, 2, 2]))
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, arrow)
Parameters
---------
start : :class:`numpy.array`
The start position of the arrow.
end : :class:`numpy.array`
The end position of the arrow.
thickness : :class:`float`
The thickness of the arrow.
height : :class:`float`
The height of the conical tip.
base_radius: :class:`float`
The base radius of the conical tip.
"""
def __init__(
self,
start=LEFT,
end=RIGHT,
thickness=0.02,
height=0.5,
base_radius=0.25,
color=WHITE,
**kwargs
):
Line3D.__init__(self, start=start, end=end, **kwargs)
self.length = np.linalg.norm(self.vect)
self.set_start_and_end_attrs(
self.start,
self.end - height * self.direction,
thickness=thickness,
**kwargs,
)
self.cone = Cone(
direction=self.direction, base_radius=base_radius, height=height, **kwargs
)
self.cone.shift(end)
self.add(self.cone)
self.set_color(color)
class Torus(ParametricSurface):
"""A torus.
Examples
---------
.. manim :: ExampleTorus
:save_last_frame:
class ExampleTorus(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
torus = Torus()
self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
self.add(axes, torus)
Parameters
---------
major_radius : :class:`float`
Distance from the center of the tube to the center of the torus.
minor_radius : :class:`float`
Radius of the tube.
"""
def __init__(
self,
major_radius=3,
minor_radius=1,
u_min=0,
u_max=TAU,
v_min=0,
v_max=TAU,
resolution=24,
**kwargs
):
self.R = major_radius
self.r = minor_radius
ParametricSurface.__init__(
self,
self.func,
u_min=u_min,
u_max=u_max,
v_min=v_min,
v_max=v_max,
resolution=resolution,
**kwargs,
)
def func(self, u, v):
P = np.array([np.cos(u), np.sin(u), 0])
return (self.R - self.r * np.cos(v)) * P - self.r * np.sin(v) * OUT
|
the-stack_0_27023
|
"""Submodule to handle web requests in opsdroid."""
import json
import logging
import ssl
from aiohttp import web
from opsdroid import __version__
_LOGGER = logging.getLogger(__name__)
class Web:
"""Create class for opsdroid Web server."""
def __init__(self, opsdroid):
"""Create web object."""
self.opsdroid = opsdroid
try:
self.config = self.opsdroid.config["web"]
except KeyError:
self.config = {}
self.web_app = web.Application()
self.runner = web.AppRunner(self.web_app)
self.site = None
self.web_app.router.add_get('/', self.web_index_handler)
self.web_app.router.add_get('', self.web_index_handler)
self.web_app.router.add_get('/stats', self.web_stats_handler)
self.web_app.router.add_get('/stats/', self.web_stats_handler)
@property
def get_port(self):
"""Return port from config or the default.
Args:
self: instance method
Returns:
int: returns value of port being used, config or default
"""
try:
port = self.config["port"]
except KeyError:
if self.get_ssl_context is not None:
port = 8443
else:
port = 8080
return port
@property
def get_host(self):
"""Return host from config or the default.
Args:
self: instance method
Returns:
string: returns address of host being used, config or default
"""
try:
host = self.config["host"]
except KeyError:
host = '127.0.0.1'
return host
@property
def get_ssl_context(self):
"""Return the ssl context or None.
Args:
self: instance method
Returns:
string (or NoneType): returns ssl context of None.
"""
try:
ssl_config = self.config["ssl"]
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
sslcontext.load_cert_chain(ssl_config["cert"], ssl_config["key"])
return sslcontext
except FileNotFoundError:
_LOGGER.error(_("Cannot find ssl cert or key."))
return None
except KeyError:
return None
async def start(self):
"""Start web servers."""
_LOGGER.info(_("Started web server on %s://%s%s"),
"http" if self.get_ssl_context is None else "https",
self.get_host,
":{}".format(self.get_port)
if self.get_port not in (80, 443) else "")
await self.runner.setup()
self.site = web.TCPSite(self.runner,
host=self.get_host,
port=self.get_port,
ssl_context=self.get_ssl_context)
await self.site.start()
async def stop(self):
"""Stop the web server."""
await self.runner.cleanup()
@staticmethod
def build_response(status, result):
"""Build a json response object to power the bot reponses.
Args:
result: serialize obj as a JSON formated stream
Returns:
json: returns json object with list of responses for the bot
"""
return web.Response(text=json.dumps(result), status=status)
def register_skill(self, opsdroid, skill, webhook):
"""Register a new skill in the web app router."""
async def wrapper(req, opsdroid=opsdroid, config=skill.config):
"""Wrap up the aiohttp handler."""
_LOGGER.info(_("Running skill %s via webhook"), webhook)
opsdroid.stats["webhooks_called"] = \
opsdroid.stats["webhooks_called"] + 1
resp = await skill(opsdroid, config, req)
if isinstance(resp, web.Response):
return resp
return Web.build_response(200, {"called_skill": webhook})
self.web_app.router.add_post(
"/skill/{}/{}".format(skill.config["name"], webhook), wrapper)
self.web_app.router.add_post(
"/skill/{}/{}/".format(skill.config["name"], webhook), wrapper)
def setup_webhooks(self, skills):
"""Add the webhooks for the webhook skills to the router."""
for skill in skills:
for matcher in skill.matchers:
if "webhook" in matcher:
self.register_skill(
self.opsdroid, skill, matcher["webhook"]
)
async def web_index_handler(self, request):
"""Handle root web request to opsdroid API.
Args:
request: web request to the root (index)
Returns:
dict: returns successful status code and greeting for the root page
"""
return self.build_response(200, {
"message": "Welcome to the opsdroid API"})
async def web_stats_handler(self, request):
"""Handle stats request.
Args:
request: web request to render opsdroid stats
Returns:
dict: returns successful status code and dictionary with
stats requested
"""
stats = self.opsdroid.stats
try:
stats["average_response_time"] = \
stats["total_response_time"] / stats["total_responses"]
except ZeroDivisionError:
stats["average_response_time"] = 0
return self.build_response(200, {
"version": __version__,
"messages": {
"total_parsed": stats["messages_parsed"],
"webhooks_called": stats["webhooks_called"],
"total_response_time": stats["total_response_time"],
"total_responses": stats["total_responses"],
"average_response_time": stats["average_response_time"]
},
"modules": {
"skills": len(self.opsdroid.skills),
"connectors": len(self.opsdroid.connectors),
"databases": len(self.opsdroid.memory.databases)
}
})
|
the-stack_0_27025
|
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
import json
from django.contrib.auth.decorators import login_required
from django.contrib.gis.geos import GEOSGeometry
from django.core import serializers
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic import ListView, View, DeleteView
from django.views.decorators.http import require_http_methods
from forms import MapForm, MapInlineFormset, UploadKMZForm, UploadJSONForm
from sodo.core.models import AOI
from sodo.locations.models import Counties
from models import Feature, FeatureType, Map, Layer, MapLayerUserRememberedParams, MapLayer, GeoeventsSource
from kmz_handler import save_kmz_file
from json import load
import logging
logger = logging.getLogger(__name__)
class CreateFeatures(View):
"""
Reads GeoJSON from post request and creates AOIS for each features.
"""
http_method_names = ['post']
def post(self, request, *args, **kwargs):
feature = None
tpi = request.META.get('HTTP_TEMP_POINT_ID', "none")
aoi = request.POST.get('aoi')
geometry = request.POST.get('geometry')
geojson = json.loads(geometry)
properties = geojson.get('properties')
aoi = AOI.objects.get(id=aoi)
job = getattr(aoi, 'job')
project = getattr(job, 'project')
template = properties.get('template') if properties else None
# TODO: handle exceptions
if template:
template = FeatureType.objects.get(id=template)
attrs = dict(aoi=aoi,
job=job,
project=project,
analyst=request.user,
template=template)
geometry = geojson.get('geometry')
geom_obj = GEOSGeometry(json.dumps(geometry))
attrs['the_geom'] = geom_obj
county_list = Counties.objects.filter(poly__contains=geom_obj.centroid.wkt)
county = None
if len(county_list):
county = str(county_list[0].name)
try:
feature = Feature(**attrs)
feature.full_clean()
if not feature.properties:
feature.properties = {}
if county:
feature.properties['county'] = county
feature.save()
except ValidationError as e:
response = HttpResponse(content=json.dumps(dict(errors=e.messages)), mimetype="application/json", status=400)
response['Temp-Point-Id'] = tpi
return response
# This feels a bit ugly but it does get the GeoJSON into the response
feature_json = serializers.serialize('json', [feature,])
feature_list = json.loads(feature_json)
feature_list[0]['geojson'] = feature.geoJSON(True)
response = HttpResponse(json.dumps(feature_list), mimetype="application/json")
response['Temp-Point-Id'] = tpi
return response
class EditFeatures(View):
"""
Reads feature info from post request and updates associated feature object.
"""
http_method_names = ['post']
def post(self, request, *args, **kwargs):
geometry = request.POST.get('geometry')
geojson = json.loads(geometry)
properties = geojson.get('properties')
try:
feature = Feature.objects.get(pk=properties.get('id'))
except ObjectDoesNotExist:
raise Http404
geometry = geojson.get('geometry')
feature.the_geom = GEOSGeometry(json.dumps(geometry))
template = properties.get('template') if properties else None
# TODO: handle exceptions
if template:
feature.template = FeatureType.objects.get(id=template)
try:
feature.full_clean()
feature.save()
except ValidationError as e:
return HttpResponse(content=json.dumps(dict(errors=e.messages)), mimetype="application/json", status=400)
return HttpResponse("{}", mimetype="application/json")
@login_required
@require_http_methods(["POST"])
def update_user_maplayer_param(request, *args, **kwargs):
user = request.user
try:
json_stuff = json.loads(request.body)
except ValueError:
return HttpResponse("{\"status\":\"Bad Request\"}", mimetype="application/json", status=400)
mlq = MapLayer.objects.filter(id=json_stuff['maplayer'])
if not mlq.exists():
return HttpResponse("{\"status:\":\"Bad Request\", \"reason\":\"MapLayer does not exist\"}", status=400)
else:
ml = mlq.get()
mlurpq = MapLayerUserRememberedParams.objects.filter(maplayer=ml, user=user)
if mlurpq.exists():
mlurp = mlurpq.get()
else:
mlurp = MapLayerUserRememberedParams(maplayer=ml, user=user, values={})
mlurp.values[json_stuff['param']] = json_stuff['newValue']
mlurp.save()
return HttpResponse(json.dumps(mlurp.values), mimetype="application/json", status=200)
def feature_delete(request, pk):
try:
feature = Feature.objects.get(pk=pk)
feature.delete()
except ObjectDoesNotExist:
raise Http404
return HttpResponse( content=pk, status=200 )
@login_required
def create_update_map(request, job_id, map_id):
if map_id:
map_obj = Map.objects.get(pk=map_id)
else:
map_obj = None
if request.method == 'POST':
form = MapForm(request.POST, prefix='map', instance=map_obj)
maplayers_formset = MapInlineFormset(request.POST, prefix='layers', instance=map_obj)
if form.is_valid() and maplayers_formset.is_valid():
form.save()
maplayers_formset.instance = form.instance
maplayers_formset.save()
return HttpResponseRedirect(reverse('job-detail', kwargs = {'pk': job_id}))
else:
form = MapForm(prefix='map', instance=map_obj)
maplayers_formset = MapInlineFormset(prefix='layers', instance=map_obj)
# form = [f for f in form if f.name not in ['zoom', 'projection', 'center_x', 'center_y']]
return render_to_response('core/generic_form.html', {
'form': form,
'layer_formset': maplayers_formset,
'custom_form': 'core/map_create.html',
'object': map_obj,
}, context_instance=RequestContext(request))
class MapListView(ListView):
model = Map
def get_context_data(self, **kwargs):
context = super(MapListView, self).get_context_data(**kwargs)
context['admin'] = self.request.user.has_perm('maps.add_map')
return context
class MapDelete(DeleteView):
model = Map
template_name = "core/generic_confirm_delete.html"
def get_success_url(self):
return reverse('map-list')
class FeatureTypeListView(ListView):
model = FeatureType
def get_context_data(self, **kwargs):
context = super(FeatureTypeListView, self).get_context_data(**kwargs)
context['admin'] = self.request.user.has_perm('maps.add_featuretype')
return context
class FeatureTypeDelete(DeleteView):
model = FeatureType
template_name = "core/generic_confirm_delete.html"
def get_success_url(self):
#TODO: Add a signal to context to
#tell user that is was sucessful.
return reverse('feature-type-list')
class LayerListView(ListView):
model = Layer
def get_context_data(self, **kwargs):
context = super(LayerListView, self).get_context_data(**kwargs)
context['admin'] = self.request.user.has_perm('maps.add_layer')
return context
class LayerImport(ListView):
model = Layer
template_name = "maps/layer_import.html"
def get_context_data(self, **kwargs):
context = super(LayerImport, self).get_context_data(**kwargs)
context['geoevents_sources'] = GeoeventsSource.objects.all()
return context
def post(self, request, *args, **kwargs):
layers = request.POST.getlist('layer')
for lay in layers:
layer = json.loads(lay)
# see if it's already in here. assume 'url' and 'layer' attributes make it unique
l = Layer.objects.filter(url=layer['url'], layer=layer['layer'])
if not l:
# add the layer
new_layer = Layer()
for key, value in layer.iteritems():
# if key == 'layer_params':
# # TODO: need to pass json object here
# pass
# else:
setattr(new_layer, key, value)
new_layer.save()
return HttpResponseRedirect(reverse('layer-list'))
class LayerDelete(DeleteView):
model = Layer
template_name = "core/generic_confirm_delete.html"
def get_success_url(self):
return reverse('layer-list')
class KMZLayerImport(ListView):
model = Layer
template_name = "maps/kmz_upload.html"
def get_context_data(self, **kwargs):
context = super(KMZLayerImport, self).get_context_data(**kwargs)
return context
def post(self, request, *args, **kwargs):
form = UploadKMZForm(request.POST, request.FILES)
if form.is_valid():
localdir = save_kmz_file(request.FILES['kmzfile'])
uri = request.build_absolute_uri(localdir)
if localdir != None:
layer = Layer.objects.create(name = request.POST['title'],type="KML",url=uri,layer="",styles="",description="")
return HttpResponseRedirect(reverse('layer-list'))
class JSONLayerImport(ListView):
model = Layer
template_name = "maps/json_upload.html"
def get_context_data(self, **kwargs):
context = super(JSONLayerImport, self).get_context_data(**kwargs)
return context
def post(self, request, *args, **kwargs):
form = UploadJSONForm(request.POST, request.FILES)
try:
dataFromFile = load(request.FILES["jsonfile"])
except ValueError as e:
##This is a bad jsonFile, We should never get to this point but it is the last layer of defense.
return HttpResponseRedirect(reverse('layer-list'))
#Check to make sure that we actually have data
if dataFromFile != None:
layerName = request.POST['title']
if not layerName.strip():
layerName = dataFromFile["name"]
#Due to the naming errors between the actual DB names and the exporting function built in the maps/models.py file for layers we have to do this in one line and not pretty.
layer = Layer.objects.create(id=dataFromFile["id"], name = layerName, image_format=dataFromFile["format"], type=dataFromFile["type"],
url=dataFromFile["url"], additional_domains=dataFromFile["subdomains"], layer=dataFromFile["layer"], transparent=dataFromFile["transparent"],
layer_params=dataFromFile["layerParams"], dynamic_params=dataFromFile["dynamicParams"], refreshrate=dataFromFile["refreshrate"],
token=dataFromFile["token"], attribution=dataFromFile["attribution"], spatial_reference=dataFromFile["spatialReference"],
layer_parsing_function=dataFromFile["layerParsingFunction"], enable_identify=dataFromFile["enableIdentify"],
root_field=dataFromFile["rootField"], info_format=dataFromFile["infoFormat"], fields_to_show=dataFromFile["fieldsToShow"],
description=dataFromFile["description"], downloadableLink=dataFromFile["downloadableLink"], layer_info_link=dataFromFile["layer_info_link"],
styles=dataFromFile["styles"])
return HttpResponseRedirect(reverse('layer-list'))
class JSONLayerExport(ListView):
model = Layer
def get(self, request, *args, **kwargs):
name = self.kwargs.get('pk').replace("%20", " ");
layer = Layer.objects.get(name__iexact = name)
layerJson = json.dumps(layer.layer_json(), indent=2);
return HttpResponse(layerJson, mimetype="application/json", status=200)
|
the-stack_0_27026
|
import argparse
import json
import random
from collections import deque
from operator import itemgetter
import os
import cv2
import mmcv
import numpy as np
import torch
from mmcv import Config, DictAction
from mmcv.parallel import collate, scatter
from mmaction.apis import init_recognizer
from mmaction.datasets.pipelines import Compose
FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL
FONTSCALE = 1
THICKNESS = 1
LINETYPE = 1
EXCLUED_STEPS = [
'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',
'PyAVDecode', 'RawFrameDecode', 'FrameSelector'
]
def parse_args():
parser = argparse.ArgumentParser(
description='MMAction2 predict different labels in a long video demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file/url')
parser.add_argument('video_path', help='video file/url')
parser.add_argument('label', help='label file')
#parser.add_argument('out_file', help='output result file in video/json')
#parser.add_argument('--sample_length', help='sample_length * stride = amount of frames for a prediction')
parser.add_argument(
'--is-folder',
type=bool,
default=False,
help='bool if video is file or folder')
parser.add_argument(
'--input-step',
type=int,
default=1,
help='input step for sampling frames')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--threshold',
type=float,
default=0.01,
help='recognition score threshold')
parser.add_argument(
'--stride',
type=float,
default=1,
help=('the prediction stride equals to stride * sample_length '
'(sample_length indicates the size of temporal window from '
'which you sample frames, which equals to '
'clip_len x frame_interval), if set as 0, the '
'prediction stride is 1'))
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--label-color',
nargs='+',
type=int,
default=(0, 0, 255),
help='font color (B, G, R) of the labels in output video')
parser.add_argument(
'--msg-color',
nargs='+',
type=int,
default=(128, 128, 128),
help='font color (B, G, R) of the messages in output video')
args = parser.parse_args()
print(args)
return args
def show_results_video(result_queue,
text_info,
thr,
msg,
frame,
video_writer,
ind, fps,
label_color=(0, 0, 255),
msg_color=(128, 128, 128),
):
if len(result_queue) != 0:
text_info = {}
results = result_queue.popleft()
for i, result in enumerate(results):
selected_label, score = result
if selected_label == "striking" and score >= 3.0:
pass
elif selected_label == "throwing" and score <= 5.0:
break
elif selected_label == 'spray' and score <= 6.0:
break
elif selected_label == 'aiming' and score <= 6.0:
break
elif score < thr:
break
location = (0, 40 + i * 20)
score = str(round(score, 2))
text = selected_label + ': ' + score
timestamp = str(round(ind/round(fps), 1))
text_info[location] = text
#write threshold passers to a txt
with open(f"{out_file_path}_detections.txt", 'a') as f:
f.write(text + " " + timestamp + "\n")
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
label_color, THICKNESS, LINETYPE)
elif len(text_info):
for location, text in text_info.items():
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
label_color, THICKNESS, LINETYPE)
else:
cv2.putText(frame, msg, (0, 40), FONTFACE, FONTSCALE, msg_color,
THICKNESS, LINETYPE)
video_writer.write(frame)
return text_info
def get_results_json(result_queue, text_info, thr, msg, ind, out_json):
if len(result_queue) != 0:
text_info = {}
results = result_queue.popleft()
for i, result in enumerate(results):
selected_label, score = result
if score < thr:
break
text_info[i + 1] = selected_label + ' ' + str(round(score, 2))
out_json[ind] = text_info
elif len(text_info):
out_json[ind] = text_info
else:
out_json[ind] = msg
return text_info, out_json
def show_results(model, data, label, args):
print(args.sample_length)
print("ARGS: ")
print(args)
#add indiviual video files to folder, if only a file then add just that to list, traverse subdirs
try:
#if declared folder
if args.is_folder == True:
video_list = []
already_done = os.listdir(f'{args.video_path}/survai_output/')
#go through all sub dirs
for roots,dirs,files in os.walk(args.video_path):
for file_name in files:
print("DIR:", roots)
#skip videos already done
if file_name.split('.')[0] + '_out.mp4' in already_done:
print("skipping: "+ file_name)
continue
else:
full_file_path = os.path.join(roots, file_name)
video_list.append(full_file_path)
#print(video_list)
#video_list = os.listdir(args.video_path)
#print(video_list)
#print(args.video_path)
except NotADirectoryError:
video_list = [args.video_path]
#print(video_list)
for video in video_list:
#if single video
#if "." in args.video_path:
# video = f"{args.video_path}"
#else:
# video = f"{args.video_path}{video}"
print(" ", video, " ")
if "survai_output" in video:
print("skipping: ", video)
continue
frame_queue = deque(maxlen=args.sample_length)
result_queue = deque(maxlen=1)
cap = cv2.VideoCapture(video)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
print("video info: ", num_frames, frame_width, frame_height, fps)
msg = 'Preparing action recognition ...'
text_info = {}
out_json = {}
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
frame_size = (frame_width, frame_height)
ind = 0
#create new folder and automatic names of output files - modelmark + epoch
output_folder_name = args.checkpoint.split("/")[1] + "__" + args.checkpoint.split("/")[2].split(".")[0]
print("OUTPUT FOLDER: ", output_folder_name)
out_name = video.split("/")[-1].split(".")[0]
out_file_name = f"{out_name}_out.mp4"
if not os.path.exists(f'outputs/{output_folder_name}'):
os.makedirs(f'outputs/{output_folder_name}')
global out_file_path
if args.is_folder == True:
out_file_path = f"{args.video_path}/survai_output/{out_file_name}"
else:
out_file_path = f"outputs/{output_folder_name}/{out_file_name}"
print("video: ", video)
print("out file path: ", out_file_path)
video_writer = None if out_file_path.endswith('.json') \
else cv2.VideoWriter(out_file_path, fourcc, fps, frame_size)
prog_bar = mmcv.ProgressBar(num_frames)
backup_frames = []
while ind < num_frames:
ind += 1
prog_bar.update()
ret, frame = cap.read()
#insure it has time to load in the frames
if ret:
pass
else:
ind -= 1
cv2.waitKey(100)
continue
if frame is None:
# drop it when encounting None
print("none")
continue
backup_frames.append(np.array(frame)[:, :, ::-1])
if ind == args.sample_length:
# provide a quick show at the beginning
frame_queue.extend(backup_frames)
backup_frames = []
elif ((len(backup_frames) == args.input_step
and ind > args.sample_length) or ind == num_frames):
# pick a frame from the backup
# when the backup is full or reach the last frame
chosen_frame = random.choice(backup_frames)
backup_frames = []
frame_queue.append(chosen_frame)
ret, scores = inference(model, data, args, frame_queue)
if ret:
num_selected_labels = min(len(label), 5)
scores_tuples = tuple(zip(label, scores))
scores_sorted = sorted(
scores_tuples, key=itemgetter(1), reverse=True)
results = scores_sorted[:num_selected_labels]
result_queue.append(results)
if out_file_path.endswith('.json'):
text_info, out_json = get_results_json(result_queue, text_info,
args.threshold, msg, ind,
out_json)
else:
text_info = show_results_video(result_queue, text_info,
args.threshold, msg, frame,
video_writer, ind, fps, args.label_color,
args.msg_color)
cap.release()
cv2.destroyAllWindows()
if out_file_path.endswith('.json'):
with open(out_file_path, 'w') as js:
json.dump(out_json, js)
def inference(model, data, args, frame_queue):
if len(frame_queue) != args.sample_length:
# Do no inference when there is no enough frames
return False, None
cur_windows = list(np.array(frame_queue))
if data['img_shape'] is None:
data['img_shape'] = frame_queue[0].shape[:2]
cur_data = data.copy()
cur_data['imgs'] = cur_windows
cur_data = args.test_pipeline(cur_data)
cur_data = collate([cur_data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
cur_data = scatter(cur_data, [args.device])[0]
with torch.no_grad():
scores = model(return_loss=False, **cur_data)[0]
if args.stride > 0:
pred_stride = int(args.sample_length * args.stride)
for _ in range(pred_stride):
frame_queue.popleft()
# for case ``args.stride=0``
# deque will automatically popleft one element
return True, scores
def main():
args = parse_args()
args.device = torch.device(args.device)
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
model = init_recognizer(cfg, args.checkpoint, device=args.device)
data = dict(img_shape=None, modality='RGB', label=-1)
with open(args.label, 'r') as f:
label = [line.strip() for line in f]
# prepare test pipeline from non-camera pipeline
cfg = model.cfg
sample_length = 0
pipeline = cfg.data.test.pipeline
pipeline_ = pipeline.copy()
for step in pipeline:
if 'SampleFrames' in step['type']:
sample_length = step['clip_len'] * step['num_clips']
data['num_clips'] = step['num_clips']
data['clip_len'] = step['clip_len']
pipeline_.remove(step)
if step['type'] in EXCLUED_STEPS:
# remove step to decode frames
pipeline_.remove(step)
test_pipeline = Compose(pipeline_)
assert sample_length > 0
args.sample_length = sample_length
args.test_pipeline = test_pipeline
show_results(model, data, label, args)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.