content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import aiohttp, asyncio
from bs4 import BeautifulSoup
import json
import time
VC_SEARCH = "https://vc.ru/search/v2/content/new"
async def parse_urls(key_word):
async with aiohttp.ClientSession() as session:
async with session.get(VC_SEARCH, params={
"query": key_word,
"target_type": 'posts',
}) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
urls = [x["href"] for x in soup.find_all("a", {"class": "content-feed__link"})]
return urls
async def get_text(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
text = " ".join(map(lambda x: x.text, soup.find("div", {"class": "l-entry__content"}).find_all("p")))
return text
async def get_all_texts(keyword):
urls = await parse_urls(keyword)
all_texts = []
for u in urls[:25]:
text = await get_text(u)
all_texts.append(text)
return all_texts
async def vc_get_data(keyword, result_file_path='result-vc.json'):
texts = await get_all_texts(keyword)
result_dict = {"company": keyword,
"texts": texts}
result_json = json.loads(json.dumps(result_dict))
return result_json
#with open(result_file_path, 'w', encoding='utf-8') as f:
# json.dump(result_json, f, ensure_ascii=False, indent=4)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(vc_get_data("сбер", "other/sber-vc.json"))
| 31.52 | 113 | 0.636421 | [
"MIT"
] | OverFitted/hacksai2021spb | Parsers/vcru.py | 1,580 | Python |
#!/usr/bin/env runaiida
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
__copyright__ = (u"Copyright (c), 2016, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.27"
__contributors__ = "Jens Broeder"
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
import sys
import os
from aiida.common.example_helpers import test_and_get_code
from aiida.plugins import DataFactory
from aiida_fleur.workflows.scf import FleurScfWorkChain
# If set to True, will ask AiiDA to run in serial mode (i.e., AiiDA will not
# invoke the mpirun command in the submission script)
run_in_serial_mode = True#False
queue = None
################################################################
ParameterData = DataFactory('parameter')
FleurinpData = DataFactory('fleur.fleurinp')
try:
dontsend = sys.argv[1]
if dontsend == "--dont-send":
submit_test = True
elif dontsend == "--send":
submit_test = False
else:
raise IndexError
except IndexError:
print(("The first parameter can only be either "
"--send or --dont-send"), file=sys.stderr)
sys.exit(1)
try:
codename = sys.argv[2]
except IndexError:
codename = None
try:
queue = sys.argv[3]
except IndexError:
queue = None
#####
code = test_and_get_code(codename, expected_code_type='fleur.fleur')
# get where tests folder is, then relative path
inpxmlfile = '/usr/users/iff_th1/broeder/aiida/github/aiida-fleur/tests/inp_xml_files/W/inp.xml'
fleurinp = FleurinpData(files = [inpxmlfile])
wf_para = Dict(dict={'fleur_runmax' : 4,
'density_criterion' : 0.000001,#})
'queue_name' : 'th123_node',
'resources' : {"num_machines": 1, "num_mpiprocs_per_machine" : 12},
'walltime_sec': 10*60, 'serial' : run_in_serial_mode})
if submit_test:
print('workchain do not have so far a submit_test function')
else:
print("Running fleur_scf_wc")
res = FleurScfWorkChain.run(wf_parameters=wf_para, fleurinp=fleurinp, fleur=code)
#remote_data= remote, fleur=code)
| 30.25974 | 97 | 0.650644 | [
"MIT"
] | anoopkcn/aiida-fleur | examples/old_workflowtests/test_run_scf2.py | 2,331 | Python |
"""
Copyright (c) 2008-2020, Jesus Cea Avion <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import unittest
import os, glob
from .test_all import db, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
class DB(unittest.TestCase):
def setUp(self):
self.path = get_new_database_path()
self.db = db.DB()
def tearDown(self):
self.db.close()
del self.db
test_support.unlink(self.path)
class DB_general(DB) :
def test_get_open_flags(self) :
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
self.assertEqual(db.DB_CREATE, self.db.get_open_flags())
def test_get_open_flags2(self) :
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE |
db.DB_THREAD)
self.assertEqual(db.DB_CREATE | db.DB_THREAD, self.db.get_open_flags())
def test_get_dbname_filename(self) :
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
self.assertEqual((self.path, None), self.db.get_dbname())
def test_get_dbname_filename_database(self) :
name = "jcea-random-name"
self.db.open(self.path, dbname=name, dbtype=db.DB_HASH,
flags = db.DB_CREATE)
self.assertEqual((self.path, name), self.db.get_dbname())
def test_bt_minkey(self) :
for i in [17, 108, 1030] :
self.db.set_bt_minkey(i)
self.assertEqual(i, self.db.get_bt_minkey())
def test_lorder(self) :
self.db.set_lorder(1234)
self.assertEqual(1234, self.db.get_lorder())
self.db.set_lorder(4321)
self.assertEqual(4321, self.db.get_lorder())
self.assertRaises(db.DBInvalidArgError, self.db.set_lorder, 9182)
def test_priority(self) :
flags = [db.DB_PRIORITY_VERY_LOW, db.DB_PRIORITY_LOW,
db.DB_PRIORITY_DEFAULT, db.DB_PRIORITY_HIGH,
db.DB_PRIORITY_VERY_HIGH]
for flag in flags :
self.db.set_priority(flag)
self.assertEqual(flag, self.db.get_priority())
def test_get_transactional(self) :
self.assertFalse(self.db.get_transactional())
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
self.assertFalse(self.db.get_transactional())
class DB_hash(DB) :
def test_h_ffactor(self) :
for ffactor in [4, 16, 256] :
self.db.set_h_ffactor(ffactor)
self.assertEqual(ffactor, self.db.get_h_ffactor())
def test_h_nelem(self) :
for nelem in [1, 2, 4] :
nelem = nelem*1024*1024 # Millions
self.db.set_h_nelem(nelem)
self.assertEqual(nelem, self.db.get_h_nelem())
def test_pagesize(self) :
for i in range(9, 17) : # From 512 to 65536
i = 1<<i
self.db.set_pagesize(i)
self.assertEqual(i, self.db.get_pagesize())
# The valid values goes from 512 to 65536
# Test 131072 bytes...
self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<17)
# Test 256 bytes...
self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<8)
class DB_txn(DB) :
def setUp(self) :
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_INIT_TXN)
self.db = db.DB(self.env)
def tearDown(self) :
self.db.close()
del self.db
self.env.close()
del self.env
test_support.rmtree(self.homeDir)
def test_flags(self) :
self.db.set_flags(db.DB_CHKSUM)
self.assertEqual(db.DB_CHKSUM, self.db.get_flags())
self.db.set_flags(db.DB_TXN_NOT_DURABLE)
self.assertEqual(db.DB_TXN_NOT_DURABLE | db.DB_CHKSUM,
self.db.get_flags())
def test_get_transactional(self) :
self.assertFalse(self.db.get_transactional())
# DB_AUTO_COMMIT = Implicit transaction
self.db.open("XXX", dbtype=db.DB_HASH,
flags = db.DB_CREATE | db.DB_AUTO_COMMIT)
self.assertTrue(self.db.get_transactional())
class DB_recno(DB) :
def test_re_pad(self) :
for i in [' ', '*'] : # Check chars
self.db.set_re_pad(i)
self.assertEqual(ord(i), self.db.get_re_pad())
for i in [97, 65] : # Check integers
self.db.set_re_pad(i)
self.assertEqual(i, self.db.get_re_pad())
def test_re_delim(self) :
for i in [' ', '*'] : # Check chars
self.db.set_re_delim(i)
self.assertEqual(ord(i), self.db.get_re_delim())
for i in [97, 65] : # Check integers
self.db.set_re_delim(i)
self.assertEqual(i, self.db.get_re_delim())
def test_re_source(self) :
for i in ["test", "test2", "test3"] :
self.db.set_re_source(i)
self.assertEqual(i, self.db.get_re_source())
class DB_queue(DB) :
def test_re_len(self) :
for i in [33, 65, 300, 2000] :
self.db.set_re_len(i)
self.assertEqual(i, self.db.get_re_len())
def test_q_extentsize(self) :
for i in [1, 60, 100] :
self.db.set_q_extentsize(i)
self.assertEqual(i, self.db.get_q_extentsize())
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DB_general))
suite.addTest(unittest.makeSuite(DB_txn))
suite.addTest(unittest.makeSuite(DB_hash))
suite.addTest(unittest.makeSuite(DB_recno))
suite.addTest(unittest.makeSuite(DB_queue))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 36.979798 | 79 | 0.642721 | [
"BSD-3-Clause"
] | OnApp/cdn-bsddb3-python | Lib3/bsddb/test/test_db.py | 7,322 | Python |
from dataclasses import dataclass, field
from datetime import datetime # for typehinting
from typing import TYPE_CHECKING, Generator, List, Literal, Optional
import aiohttp
import dateparser
from .exceptions import UnsupportedRegionError
from .pricing import PriceQuery, query_price
COUNT = 30 # Items per page of paginated response
if TYPE_CHECKING:
from .regions import Region # pragma: no cover
@dataclass
class RatingContent:
id: int = None
name: str = None
type: Literal["descriptor", "interactive"] = None
image_url: Optional[str] = None # JP Field
svg_image_url: Optional[str] = None # JP Field
def __init__(self, data) -> None:
self.id = data['id']
self.name = data['name']
self.type = data['type']
if data.get('image_url'):
self.image_url = data['image_url']
if data.get('svg_image_url'):
self.svg_image_url = data['svg_image_url']
@dataclass
class Rating:
age: int = None
id: int = None
image_url: Optional[str] = None
name: str = None
provisional: bool = None
svg_image_url: str = None
def __init__(self, data) -> None:
if (data['id']) == 0:
return
self.age = data['age']
self.id = data['id']
if data.get('image_url'):
self.image_url = data['image_url']
self.provisional = data['provisional']
self.svg_image_url = data['svg_image_url']
@dataclass
class RatingSystem:
id: int = None
name: str = None
def __init__(self, data) -> None:
self.id = data['id']
self.name = data['name']
@dataclass
class Game:
region: "Region" = None
content_type: str = None # Literal["game", "bundle"] ??? expand and replace hint
dominant_colors: List[str] = None
formal_name: str = None
hero_banner_url: str = None
id: int = None
is_new: bool = None
membership_required: bool = None
public_status: Literal["public"] = None
rating_content: List[RatingContent] = field(default_factory=list)
rating: Rating = None
rating_system: RatingSystem = None
release_date_on_eshop: datetime = None
screenshots: List[str] = field(default_factory=list)
strong_disclaimer: str = None
tags: List = field(default_factory=list)
target_titles: List = field(default_factory=list)
def __init__(self, data, region) -> None:
self.region = region
self.content_type = data['content_type']
self.dominant_colors = data['dominant_colors']
self.formal_name = data['formal_name']
self.hero_banner_url = data['hero_banner_url']
self.id = data['id']
self.is_new = data['is_new']
self.membership_required = data['membership_required']
self.public_status = data['public_status']
self.rating_content = [RatingContent(c) for c in data['rating_info']['content_descriptors']]
self.rating = Rating(data['rating_info']['rating'])
self.rating_system = RatingSystem(data['rating_info']['rating_system'])
# TODO: is this dateparser correct?
self.release_date_on_eshop = dateparser.parse(data['release_date_on_eshop'], settings={'TIMEZONE': "UTC"})
self.screenshots = [s['images'][0]['url'] for s in data['screenshots']]
self.strong_disclaimer = data.get('strong_disclaimer', None)
self.tags = data['tags']
self.target_titles = data['target_titles']
async def query_price(self) -> PriceQuery:
return await query_price(self.region, self)
async def query_listing(region: "Region", type: Literal["sales", "new", "ranking"]) -> Generator[Game, None, None]:
if not region.supports_listing:
raise UnsupportedRegionError("Region does not support listings")
if type not in ["sales", "new", "ranking"]:
raise ValueError("Invalid type: " + type)
lang, reg = region.culture_code.split('_')
offset = 0
async with aiohttp.ClientSession() as session:
while True:
url = f'https://ec.nintendo.com/api/{reg}/{lang}/search/{type}?offset={offset}&count={COUNT}'
async with session.get(url) as request:
request.raise_for_status()
data = await request.json()
for game in data['contents']:
yield Game(game, region)
if (offset + COUNT) >= data['total']:
break
offset += COUNT
| 32.816176 | 115 | 0.636791 | [
"MIT"
] | MattBSG/Switch-REST | nsecpy/listing.py | 4,463 | Python |
from collections import defaultdict
from typing import Dict, Tuple, Iterator, Callable, Any, Optional
from dataclasses import dataclass
"""
Provides the `TaggedProfiler` class related to record profiling.
TODO: Better description needed.
"""
@dataclass
class TaggedProfilerRecordStatus:
offset: int
tag: str
key: str
val: Any
r: Optional[dict]
@dataclass
class TaggedProfilerSummary:
total: int
histo: dict
index: Optional[dict]
cache: Optional[dict]
def describe(self) -> Iterator[str]:
yield f"histo = {self.histo}"
if self.index is None:
yield f"index = {self.index}"
else:
yield f"index with {len(self.index)} items:"
for label, nums in self.index.items():
yield f"label = '{label}, size = {len(nums)}:"
if self.cache is not None:
for n in nums:
yield f"cache[{n}] = {self.cache[n]}"
class TaggedProfiler:
"""A useful tag-based profiler class which we'll describe when we have more time."""
def __init__(self, tagmap: Dict[str,Callable]):
self.tagmap = tagmap
def eval_dict(self, r: dict) -> Iterator[Tuple[str,str,str]]:
for (tag, f) in self.tagmap.items():
for (k, v) in r.items():
if f(v):
yield (tag, k, v)
def evaluate(self, recs: Iterator[dict], deep: bool = False) -> Iterator[TaggedProfilerRecordStatus]:
for (i, r) in enumerate(recs):
for (tag, k, v) in self.eval_dict(r):
yield TaggedProfilerRecordStatus(i, tag, k, v, r if deep else None)
def profile(self, recs: Iterator[dict], index: bool = False, deep: bool = False) -> TaggedProfilerSummary:
"""Provides the most useful summary counts you'll likely want from the incoming record sequence.
Optional :index and :deep flags allow us to return special indexing and cachinc structs which we'll describe later."""
# We use underscores for all "recording" structures.
# Non-nunderscore names for input variables and flags.
labels = list(self.tagmap.keys())
temp_cache: Dict[int,Any] = {}
temp_index: Dict[str,Any] = {k:defaultdict(int) for k in labels}
for status in self.evaluate(recs, deep):
temp_cache[status.offset] = status.r if deep else 1
temp_index[status.tag][status.offset] += 1
_total = len(temp_cache)
_histo: Dict[str,int] = {k:len(v) for (k,v) in temp_index.items()}
_index: Optional[Dict[str,list]] = None
_cache: Optional[Dict[int,Any]] = None
if temp_index:
_index = {k:list(v.keys()) for k,v in temp_index.items()}
if deep:
_cache = temp_cache
return TaggedProfilerSummary(_total, _histo, _index, _cache)
| 37.697368 | 126 | 0.614311 | [
"Apache-2.0"
] | wstlabs/caixa | caixa/profile/tagged.py | 2,865 | Python |
import torch;
import numpy as np;
from torch import nn;
from torch import optim;
import torch.functional as func;
from torchvision import datasets, transforms, models;
import time;
from os import path;
import argparse;
import utils
import json;
def main(test_image_path, checkpoint_path, top_k, category_names, gpu):
print("test_image_path: " , test_image_path);
print("checkpoint_path: " , checkpoint_path);
print("top_k: " , top_k);
print("category_names: " , category_names);
print("Use_GPU" , gpu);
if gpu == True:
device='cuda';
else:
device='cpu';
if category_names is not None:
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
else:
cat_to_name = None;
model, class_to_idx = utils.load_model_checkpoint(checkpoint_path);
idx_to_class = {class_to_idx[k] : k for k in class_to_idx};
probs, outputs, names = utils.predict(test_image_path, model, idx_to_class, cat_to_name, top_k, device);
print(f'The Top {top_k} predictions are:');
for i in range(top_k):
print(f'\tPrediction #{i} : {names[i]} with Probability : {probs[i]:.3f}');
if __name__ == '__main__':
print('qaz');
parser = argparse.ArgumentParser(description='Image Classification Project')
parser.add_argument('test_image_path', action="store", help="Location of Test file for predicting classes of");
parser.add_argument('checkpoint_path', action="store", help="Location of Model Checkpoint file (must have file format .pth)");
parser.add_argument('--top_k', action="store", dest="top_k", help="Number of Top Likely classes predicted.", default=3, type=int)
parser.add_argument('--category_names', action="store", dest="category_names", help="path to a file with class categories to real names", default="cat_to_name.json");
parser.add_argument('--gpu', action="store_true", dest="gpu", default=False, help="is provided CUDA gpu will be used, else CPU")
parsed_args = parser.parse_args();
main(parsed_args.test_image_path, parsed_args.checkpoint_path, parsed_args.top_k, parsed_args.category_names, parsed_args.gpu); | 41.188679 | 170 | 0.696748 | [
"MIT"
] | ravishchawla/Udacity-Data-Scientist-nd | image_classifier/predict.py | 2,183 | Python |
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
| 31.155844 | 75 | 0.620675 | [
"BSD-2-Clause"
] | jespino/anillo | anillo/utils/structures.py | 2,399 | Python |
# -*- coding: utf-8 -*-
# This is a simple mailbox polling script for the Sahana Messaging Module
# If there is a need to collect from non-compliant mailers then suggest using the robust Fetchmail to collect & store in a more compliant mailer!
# This script doesn't handle MIME attachments
import sys, socket, email, uuid
# Read-in configuration from Database
settings = db(db.msg_email_settings.id == 1).select(limitby=(0, 1)).first()
host = settings.inbound_mail_server
server_type = settings.inbound_mail_type
ssl = settings.inbound_mail_ssl
port = settings.inbound_mail_port
username = settings.inbound_mail_username
password = settings.inbound_mail_password
delete = settings.inbound_mail_delete
if server_type == "pop3":
import poplib
# http://docs.python.org/library/poplib.html
try:
if ssl:
p = poplib.POP3_SSL(host, port)
else:
p = poplib.POP3(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
try:
# Attempting APOP authentication...
p.apop(username, password)
except poplib.error_proto:
# Attempting standard authentication...
try:
p.user(username)
p.pass_(password)
except poplib.error_proto, e:
print "Login failed:", e
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status="Login failed: %s" % e)
except:
db.msg_email_inbound_status.insert(status="Login failed: %s" % e)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
dellist = []
mblist = p.list()[1]
for item in mblist:
number, octets = item.split(" ")
# Retrieve the message (storing it in a list of lines)
lines = p.retr(number)[1]
# Create an e-mail object representing the message
msg = email.message_from_string("\n".join(lines))
# Parse out the 'From' Header
sender = msg["from"]
# Parse out the 'Subject' Header
if "subject" in msg:
subject = msg["subject"]
else:
subject = ""
# Parse out the 'Body'
textParts = msg.get_payload()
body = textParts[0].get_payload()
# Store in DB
uuidstamp = uuid.uuid4()
db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body)
if delete:
# Add it to the list of messages to delete later
dellist.append(number)
# Explicitly commit DB operations when running from Cron
db.commit()
# Iterate over the list of messages to delete
for number in dellist:
p.dele(number)
p.quit()
elif server_type == "imap":
import imaplib
# http://docs.python.org/library/imaplib.html
try:
if ssl:
M = imaplib.IMAP4_SSL(host, port)
else:
M = imaplib.IMAP4(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
try:
M.login(username, password)
except M.error, e:
error = "Login failed: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
dellist = []
# Select inbox
M.select()
# Search for Messages to Download
typ, data = M.search(None, "ALL")
for num in data[0].split():
typ, msg_data = M.fetch(num, "(RFC822)")
for response_part in msg_data:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1])
# Parse out the 'From' Header
sender = msg["from"]
# Parse out the 'Subject' Header
if "subject" in msg:
subject = msg["subject"]
else:
subject = ""
# Parse out the 'Body'
textParts = msg.get_payload()
body = textParts[0].get_payload()
# Store in DB
uuidstamp = uuid.uuid4()
db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body)
if delete:
# Add it to the list of messages to delete later
dellist.append(num)
# Explicitly commit DB operations when running from Cron
db.commit()
# Iterate over the list of messages to delete
for number in dellist:
typ, response = M.store(number, "+FLAGS", r"(\Deleted)")
M.close()
M.logout()
| 37.333333 | 145 | 0.589973 | [
"MIT"
] | dotskapes/dotSkapes | cron/email_receive.py | 5,826 | Python |
"""
Module to wrap an integer in bitwise flag/field accessors.
"""
from collections import OrderedDict
from pcapng.ngsix import namedtuple, Iterable
class FlagBase(object):
"""\
Base class for flag types to be used in a Flags object.
Handles the bitwise math so subclasses don't have to worry about it.
"""
__slots__ = [
'owner',
'offset',
'size',
'extra',
'mask',
]
def __init__(self, owner, offset, size, extra=None):
if size < 1:
raise TypeError('Flag must be at least 1 bit wide')
if size > owner._nbits:
raise TypeError('Flag must fit into owner size')
self.owner = owner
self.offset = offset
self.size = size
self.extra = extra
self.mask = ((1 << self.size)-1) << self.offset
def get_bits(self):
return (self.owner._value & self.mask) >> self.offset
def set_bits(self, val):
val &= (1 << self.size) - 1
self.owner._value &= ~self.mask
self.owner._value |= (val << self.offset)
class FlagBool(FlagBase):
"""Object representing a single boolean flag"""
def __init__(self, owner, offset, size, extra=None):
if size != 1:
raise TypeError('{cls} can only be 1 bit in size'.format(cls=self.__class__.__name__))
super(FlagBool, self).__init__(owner, offset, size)
def get(self):
return bool(self.get_bits())
def set(self, val):
self.set_bits(int(bool(val)))
class FlagUInt(FlagBase):
"""\
Object representing an unsigned integer of the given size stored in
a larger bitfield
"""
def get(self):
return self.get_bits()
def set(self, val):
self.set_bits(val)
class FlagEnum(FlagBase):
"""\
Object representing a range of values stored in part of a larger
bitfield
"""
def __init__(self, owner, offset, size, extra=None):
if not isinstance(extra, Iterable):
raise TypeError('{cls} needs an iterable of values'.format(cls=self.__class__.__name__))
extra = list(extra)
if len(extra) > 2**size:
raise TypeError('{cls} iterable has too many values (got {got}, {size} bits only address {max})'.format(cls=self.__class__.__name__, got=len(extra), size=size, max=2**size))
super(FlagEnum, self).__init__(owner, offset, size, extra)
def get(self):
val = self.get_bits()
try:
return self.extra[val]
except IndexError:
return '[invalid value]'
def set(self, val):
if val in self.extra:
self.set_bits(self.extra.index(val))
elif isinstance(val, int):
self.set_bits(val)
else:
raise TypeError('Invalid value {val} for {cls}'.format(val=val, cls=self.__class__.__name__))
# Class representing a single flag schema for FlagWord.
# 'nbits' defaults to 1, and 'extra' defaults to None.
FlagField = namedtuple('FlagField', ('name', 'ftype', 'nbits', 'extra'),
defaults=(1, None))
class FlagWord(object):
"""\
Class to wrap an integer in bitwise flag/field accessors.
"""
__slots__ = [
'_nbits',
'_value',
'_schema',
]
def __init__(self, schema, nbits=32, initial=0):
"""
:param schema:
A list of FlagField objects representing the values to be packed
into this object, in order from LSB to MSB of the underlying int
:param nbits:
An integer representing the total number of bits used for flags
:param initial:
The initial integer value of the flags field
"""
self._nbits = nbits
self._value = initial
self._schema = OrderedDict()
tot_bits = sum([item.nbits for item in schema])
if tot_bits > nbits:
raise TypeError("Too many fields for {nbits}-bit field (schema defines {tot} bits)".format(nbits=nbits, tot=tot_bits))
bitn = 0
for item in schema:
if not isinstance(item, FlagField):
raise TypeError('Schema must be composed of FlagField objects')
if not issubclass(item.ftype, FlagBase):
raise TypeError('Expected FlagBase, got {}'.format(item.ftype))
self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)
bitn += item.nbits
def __int__(self):
return self._value
def __repr__(self):
rv = '<{0} (value={1})'.format(self.__class__.__name__, self._value)
for k, v in self._schema.items():
rv += ' {0}={1}'.format(k, v.get())
return rv+'>'
def __getattr__(self, name):
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.get()
def __setattr__(self, name, val):
try:
return object.__setattr__(self, name, val)
except AttributeError:
pass
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.set(val)
if __name__ == '__main__':
f = FlagWord([
FlagField('inout', FlagEnum, 2, ('NA', 'inbound', 'outbound')),
FlagField('casttype', FlagEnum, 3, ('NA', 'unicast', 'multicast', 'broadcast', 'promiscuous')),
FlagField('fcslen', FlagUInt, 4),
FlagField('reserved', FlagUInt, 7),
FlagField('err_16', FlagBool),
FlagField('err_17', FlagBool),
FlagField('err_18', FlagBool),
FlagField('err_19', FlagBool),
FlagField('err_20', FlagBool),
FlagField('err_21', FlagBool),
FlagField('err_22', FlagBool),
FlagField('err_23', FlagBool),
FlagField('err_crc', FlagBool),
FlagField('err_long', FlagBool),
FlagField('err_short', FlagBool),
FlagField('err_frame_gap', FlagBool),
FlagField('err_frame_align', FlagBool),
FlagField('err_frame_delim', FlagBool),
FlagField('err_preamble', FlagBool),
FlagField('err_symbol', FlagBool),
])
f.fcslen = 12
print(f)
print(int(f))
| 30.746341 | 185 | 0.5802 | [
"Apache-2.0"
] | Boolean263/python-pcapng | pcapng/flags.py | 6,303 | Python |
import os
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
PARENT_DIR = os.path.abspath(os.path.join(CUR_DIR, os.pardir))
DATA_DIR = os.path.join(PARENT_DIR, "input")
IMAGE_DIR = os.path.join(DATA_DIR, "image")
def predict_label(json_data, filename):
print(json_data)
drivename, fname = filename.split("/")
fname = fname.split(".")[0]
bounding_box_path = os.path.join("classify/bounding_boxes", fname+'.json')
bounding_box_filename = os.path.join(CUR_DIR, bounding_box_path)
output_path = os.path.join(CUR_DIR, "classify/write_data.txt")
image_filename = os.path.join(IMAGE_DIR, fname+'.png')
try:
open(bounding_box_filename, 'w').close()
except Exception as e:
pass
with open(bounding_box_filename,'a') as f:
f.write(json_data)
os.system("python3 {} --image_file={}".format(os.path.join(CUR_DIR, "classify/classifier.py"), image_filename))
data = os.popen("cat {}".format(output_path)).read()
os.system("rm classify/bounding_boxes/*.json")
return get_keyword(data)
def get_keyword(data):
pedestrian_keywords = {'person', 'man', 'woman', 'walker', 'pedestrian'}
car_keywords = {'car'}
van_keywords = {'van', 'minivan', 'bus', 'minibus'}
truck_keywords = {'truck'}
cyclist_keywords = {'cyclist', 'motorcyclist', 'unicyclist', 'bicycle', 'motocycle',
'bike', 'motorbike', 'unicycle', 'monocycle', 'rickshaw'}
words = []
for w in data.split(','):
words.extend(w.split(' '))
words = set(words)
if words.intersection(car_keywords):
return 0
if words.intersection(van_keywords):
return 1
if words.intersection(truck_keywords):
return 2
if words.intersection(pedestrian_keywords):
return 3
if words.intersection(cyclist_keywords):
return 4
return -1
| 32.826923 | 112 | 0.718219 | [
"Apache-2.0"
] | hasanari/sane | app/predict_label.py | 1,707 | Python |
from ows_refactored.common.ows_reslim_cfg import reslim_landsat
bands_ls5_st = {
"ST_B6": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
bands_ls7_st = {
"ST_B6": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
bands_ls8_st = {
"ST_B10": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
style_lsc2_st = {
"name": "surface_temperature",
"title": "Surface temperature - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_masked = {
"name": "surface_temperature_masked",
"title": "Surface temperature (cloud masked) - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"pq_masks": [
{
"band": "QA_PIXEL",
"flags": {
"clear": True
},
},
],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_masked_ls8 = {
"name": "surface_temperature_masked",
"title": "Surface temperature (cloud masked) - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"pq_masks": [
{
"band": "QA_PIXEL",
"flags": {
"clear": True,
"cirrus": "not_high_confidence"
},
},
],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_qa = {
"name": "surface_temperature_uncertainty",
"title": "Surface temperature uncertainty - Celsius",
"abstract": "Surface temperature uncertainty in degrees Celsius",
"index_expression": "(0.01*st_qa)",
"mpl_ramp": "viridis",
"range": [0.0, 6.0],
"legend": {
"begin": "0.0",
"end": "6.0",
"decimal_places": 1,
"ticks": ["0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0"],
"tick_labels": {
"0.0": {"label": "0.0"},
"1.0": {"label": "1.0"},
"2.0": {"label": "2.0"},
"3.0": {"label": "3.0"},
"4.0": {"label": "4.0"},
"5.0": {"label": "5.0"},
"6.0": {"prefix": ">"},
},
},
}
layer_ls8 = {
"title": "Surface temperature (Landsat 8)",
"name": "ls8_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 8 product has a spatial resolution of 30 m and a temporal coverage of 2013 to present.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls8_st",
"bands": bands_ls8_st,
"dynamic": True,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False, # True
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls8_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked_ls8,
],
},
}
layer_ls7 = {
"title": "Surface temperature (Landsat 7)",
"name": "ls7_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 7 product has a spatial resolution of 30 m and a temporal coverage of 1999 to present.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls7_st",
"bands": bands_ls7_st,
"dynamic": True,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False, # True
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls7_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked,
],
},
}
layer_ls5 = {
"title": "Surface temperature (Landsat 5)",
"name": "ls5_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 5 product has a spatial resolution of 30 m and a temporal coverage of 1984 to 2012.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls5_st",
"bands": bands_ls5_st,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False, # True
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls5_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked,
],
},
}
| 40.075188 | 390 | 0.621764 | [
"Apache-2.0"
] | FlexiGroBots-H2020/config | services/ows_refactored/surface_temperature/ows_lsc2_st_cfg.py | 10,666 | Python |
"""
Unit tests for nltk.tokenize.
See also nltk/test/tokenize.doctest
"""
import pytest
from nltk.tokenize import (
punkt,
word_tokenize,
TweetTokenizer,
StanfordSegmenter,
TreebankWordTokenizer,
SyllableTokenizer,
LegalitySyllableTokenizer,
)
def setup_module(module):
import pytest
try:
seg = StanfordSegmenter()
seg.default_config("ar")
seg.default_config("zh")
except LookupError as e:
pytest.skip(
"Tests for nltk.tokenize.stanford_segmenter skipped: %s" % str(e)
)
try:
StanfordTokenizer()
except LookupError:
pytest.skip(
"Tests for nltk.tokenize.stanford are skipped because the stanford postagger jar doesn't exist"
)
class TestTokenize:
def test_tweet_tokenizer(self):
"""
Test TweetTokenizer using words with special and accented characters.
"""
tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
s9 = "@myke: Let's test these words: resumé España München français"
tokens = tokenizer.tokenize(s9)
expected = [
':',
"Let's",
'test',
'these',
'words',
':',
'resumé',
'España',
'München',
'français',
]
assert tokens == expected
def test_sonority_sequencing_syllable_tokenizer(self):
"""
Test SyllableTokenizer tokenizer.
"""
tokenizer = SyllableTokenizer()
tokens = tokenizer.tokenize('justification')
assert tokens == ['jus', 'ti', 'fi', 'ca', 'tion']
def test_legality_principle_syllable_tokenizer(self):
"""
Test LegalitySyllableTokenizer tokenizer.
"""
from nltk.corpus import words
test_word = "wonderful"
tokenizer = LegalitySyllableTokenizer(words.words())
tokens = tokenizer.tokenize(test_word)
assert tokens == ['won', 'der', 'ful']
def test_stanford_segmenter_arabic(self):
"""
Test the Stanford Word Segmenter for Arabic (default config)
"""
try:
seg = StanfordSegmenter()
seg.default_config('ar')
sent = u'يبحث علم الحاسوب استخدام الحوسبة بجميع اشكالها لحل المشكلات'
segmented_sent = seg.segment(sent.split())
assert segmented_sent.split() == [
'يبحث',
'علم',
'الحاسوب',
'استخدام',
'الحوسبة',
'ب',
'جميع',
'اشكال',
'ها',
'ل',
'حل',
'المشكلات',
]
except LookupError as e:
pytest.skip(str(e))
def test_stanford_segmenter_chinese(self):
"""
Test the Stanford Word Segmenter for Chinese (default config)
"""
try:
seg = StanfordSegmenter()
seg.default_config('zh')
sent = u"这是斯坦福中文分词器测试"
segmented_sent = seg.segment(sent.split())
assert segmented_sent.split() == ['这', '是', '斯坦福', '中文', '分词器', '测试']
except LookupError as e:
pytest.skip(str(e))
def test_phone_tokenizer(self):
"""
Test a string that resembles a phone number but contains a newline
"""
# Should be recognized as a phone number, albeit one with multiple spaces
tokenizer = TweetTokenizer()
test1 = "(393) 928 -3010"
expected = ['(393) 928 -3010']
result = tokenizer.tokenize(test1)
assert result == expected
# Due to newline, first three elements aren't part of a phone number;
# fourth is
test2 = "(393)\n928 -3010"
expected = ['(', '393', ')', "928 -3010"]
result = tokenizer.tokenize(test2)
assert result == expected
def test_pad_asterisk(self):
"""
Test padding of asterisk for word tokenization.
"""
text = "This is a, *weird sentence with *asterisks in it."
expected = ['This', 'is', 'a', ',', '*', 'weird', 'sentence',
'with', '*', 'asterisks', 'in', 'it', '.']
assert word_tokenize(text) == expected
def test_pad_dotdot(self):
"""
Test padding of dotdot* for word tokenization.
"""
text = "Why did dotdot.. not get tokenized but dotdotdot... did? How about manydots....."
expected = ['Why', 'did', 'dotdot', '..', 'not', 'get',
'tokenized', 'but', 'dotdotdot', '...', 'did', '?',
'How', 'about', 'manydots', '.....']
assert word_tokenize(text) == expected
def test_remove_handle(self):
"""
Test remove_handle() from casual.py with specially crafted edge cases
"""
tokenizer = TweetTokenizer(strip_handles=True)
# Simple example. Handles with just numbers should be allowed
test1 = "@twitter hello @twi_tter_. hi @12345 @123news"
expected = ['hello', '.', 'hi']
result = tokenizer.tokenize(test1)
assert result == expected
# Handles are allowed to follow any of the following characters
test2 = "@n`@n~@n(@n)@n-@n=@n+@n\\@n|@n[@n]@n{@n}@n;@n:@n'@n\"@n/@n?@n.@n,@n<@n>@n @n\n@n ñ@n.ü@n.ç@n."
expected = [
'`',
'~',
'(',
')',
'-',
'=',
'+',
'\\',
'|',
'[',
']',
'{',
'}',
';',
':',
"'",
'"',
'/',
'?',
'.',
',',
'<',
'>',
'ñ',
'.',
'ü',
'.',
'ç',
'.',
]
result = tokenizer.tokenize(test2)
assert result == expected
# Handles are NOT allowed to follow any of the following characters
test3 = "a@n j@n z@n A@n L@n Z@n 1@n 4@n 7@n 9@n 0@n _@n !@n @@n #@n $@n %@n &@n *@n"
expected = [
'a',
'@n',
'j',
'@n',
'z',
'@n',
'A',
'@n',
'L',
'@n',
'Z',
'@n',
'1',
'@n',
'4',
'@n',
'7',
'@n',
'9',
'@n',
'0',
'@n',
'_',
'@n',
'!',
'@n',
'@',
'@n',
'#',
'@n',
'$',
'@n',
'%',
'@n',
'&',
'@n',
'*',
'@n',
]
result = tokenizer.tokenize(test3)
assert result == expected
# Handles are allowed to precede the following characters
test4 = "@n!a @n#a @n$a @n%a @n&a @n*a"
expected = ['!', 'a', '#', 'a', '$', 'a', '%', 'a', '&', 'a', '*', 'a']
result = tokenizer.tokenize(test4)
assert result == expected
# Tests interactions with special symbols and multiple @
test5 = "@n!@n @n#@n @n$@n @n%@n @n&@n @n*@n @n@n @@n @n@@n @n_@n @n7@n @nj@n"
expected = [
'!',
'@n',
'#',
'@n',
'$',
'@n',
'%',
'@n',
'&',
'@n',
'*',
'@n',
'@n',
'@n',
'@',
'@n',
'@n',
'@',
'@n',
'@n_',
'@n',
'@n7',
'@n',
'@nj',
'@n',
]
result = tokenizer.tokenize(test5)
assert result == expected
# Tests that handles can have a max length of 20
test6 = "@abcdefghijklmnopqrstuvwxyz @abcdefghijklmnopqrst1234 @abcdefghijklmnopqrst_ @abcdefghijklmnopqrstendofhandle"
expected = ['uvwxyz', '1234', '_', 'endofhandle']
result = tokenizer.tokenize(test6)
assert result == expected
# Edge case where an @ comes directly after a long handle
test7 = "@abcdefghijklmnopqrstu@abcde @abcdefghijklmnopqrst@abcde @abcdefghijklmnopqrst_@abcde @abcdefghijklmnopqrst5@abcde"
expected = [
'u',
'@abcde',
'@abcdefghijklmnopqrst',
'@abcde',
'_',
'@abcde',
'5',
'@abcde',
]
result = tokenizer.tokenize(test7)
assert result == expected
def test_treebank_span_tokenizer(self):
"""
Test TreebankWordTokenizer.span_tokenize function
"""
tokenizer = TreebankWordTokenizer()
# Test case in the docstring
test1 = "Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks)."
expected = [
(0, 4),
(5, 12),
(13, 17),
(18, 19),
(19, 23),
(24, 26),
(27, 30),
(31, 32),
(32, 36),
(36, 37),
(37, 38),
(40, 46),
(47, 48),
(48, 51),
(51, 52),
(53, 55),
(56, 59),
(60, 62),
(63, 68),
(69, 70),
(70, 76),
(76, 77),
(77, 78),
]
result = list(tokenizer.span_tokenize(test1))
assert result == expected
# Test case with double quotation
test2 = "The DUP is similar to the \"religious right\" in the United States and takes a hardline stance on social issues"
expected = [
(0, 3),
(4, 7),
(8, 10),
(11, 18),
(19, 21),
(22, 25),
(26, 27),
(27, 36),
(37, 42),
(42, 43),
(44, 46),
(47, 50),
(51, 57),
(58, 64),
(65, 68),
(69, 74),
(75, 76),
(77, 85),
(86, 92),
(93, 95),
(96, 102),
(103, 109),
]
result = list(tokenizer.span_tokenize(test2))
assert result == expected
# Test case with double qoutation as well as converted quotations
test3 = "The DUP is similar to the \"religious right\" in the United States and takes a ``hardline'' stance on social issues"
expected = [
(0, 3),
(4, 7),
(8, 10),
(11, 18),
(19, 21),
(22, 25),
(26, 27),
(27, 36),
(37, 42),
(42, 43),
(44, 46),
(47, 50),
(51, 57),
(58, 64),
(65, 68),
(69, 74),
(75, 76),
(77, 79),
(79, 87),
(87, 89),
(90, 96),
(97, 99),
(100, 106),
(107, 113),
]
result = list(tokenizer.span_tokenize(test3))
assert result == expected
def test_word_tokenize(self):
"""
Test word_tokenize function
"""
sentence = "The 'v', I've been fooled but I'll seek revenge."
expected = ['The', "'", 'v', "'", ',', 'I', "'ve", 'been', 'fooled',
'but', 'I', "'ll", 'seek', 'revenge', '.']
assert word_tokenize(sentence) == expected
sentence = "'v' 're'"
expected = ["'", 'v', "'", "'re", "'"]
assert word_tokenize(sentence) == expected
def test_punkt_pair_iter(self):
test_cases = [
('12', [('1', '2'), ('2', None)]),
('123', [('1', '2'), ('2', '3'), ('3', None)]),
('1234', [('1', '2'), ('2', '3'), ('3', '4'), ('4', None)]),
]
for (test_input, expected_output) in test_cases:
actual_output = [x for x in punkt._pair_iter(test_input)]
assert actual_output == expected_output
def test_punkt_pair_iter_handles_stop_iteration_exception(self):
# test input to trigger StopIteration from next()
it = iter([])
# call method under test and produce a generator
gen = punkt._pair_iter(it)
# unpack generator, ensure that no error is raised
list(gen)
def test_punkt_tokenize_words_handles_stop_iteration_exception(self):
obj = punkt.PunktBaseClass()
class TestPunktTokenizeWordsMock:
def word_tokenize(self, s):
return iter([])
obj._lang_vars = TestPunktTokenizeWordsMock()
# unpack generator, ensure that no error is raised
list(obj._tokenize_words('test'))
def test_punkt_tokenize_custom_lang_vars(self):
# Create LangVars including a full stop end character as used in Bengali
class BengaliLanguageVars(punkt.PunktLanguageVars):
sent_end_chars = ('.', '?', '!', '\u0964')
obj = punkt.PunktSentenceTokenizer(lang_vars = BengaliLanguageVars())
# We now expect these sentences to be split up into the individual sentences
sentences = u"উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"
expected = ["উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন।", "অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন।", "এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"]
assert obj.tokenize(sentences) == expected
def test_punkt_tokenize_no_custom_lang_vars(self):
obj = punkt.PunktSentenceTokenizer()
# We expect these sentences to not be split properly, as the Bengali full stop '।' is not included in the default language vars
sentences = u"উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"
expected = ["উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"]
assert obj.tokenize(sentences) == expected
| 31.773784 | 348 | 0.467363 | [
"Apache-2.0"
] | Geolem/nltk | nltk/test/unit/test_tokenize.py | 17,387 | Python |
from kafka import KafkaConsumer
KAFKA_SERVER_URL = 'localhost:9092'
LOGIN = "bob"
PWD = "bob-secret"
TOPIC = "test-topic"
GROUP_ID = 'bob-group'
consumer = KafkaConsumer(TOPIC, group_id=GROUP_ID, bootstrap_servers=KAFKA_SERVER_URL,
security_protocol="SASL_PLAINTEXT",
sasl_mechanism='PLAIN', sasl_plain_username=LOGIN, sasl_plain_password=PWD)
for msg in consumer:
print(msg)
| 28.866667 | 100 | 0.69746 | [
"Apache-2.0"
] | pengfei99/KafkaPyClient | kafka-python/ConsumerAuth.py | 433 | Python |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detr.models.backbone import Backbone, Joiner
from detr.models.detr import DETR, PostProcess
from detr.models.position_encoding import PositionEmbeddingSine
from detr.models.segmentation import DETRsegm, PostProcessPanoptic
from detr.models.transformer import Transformer
dependencies = ["torch", "torchvision"]
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet50_dc5_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet101_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
| 37.218935 | 117 | 0.71097 | [
"Apache-2.0"
] | justinkay/detr | detr/hubconf.py | 6,290 | Python |
"""
'storage-add ' sub command
"""
#To prevent Py2 to interpreting print(val) as a tuple.
from __future__ import print_function
import os
import tempfile
import sys
import json
import utils
from storage_yaml import to_storage_yaml
# noqa # pylint: disable=too-many-branches
def set_args(name, subparsers):
""" add arguments, and their options """
parser = subparsers.add_parser(name)
arg = parser.add_argument
arg(
"name",
help="Storage Name"
)
arg(
"--type",
help="Storage Type",
choices=["Replica1", "Replica3", "External", "Replica2"],
default=None
)
arg(
"--device",
help=("Storage device in <node>:<device> format, "
"Example: --device kube1.example.com:/dev/vdc"),
default=[],
action="append"
)
arg(
"--path",
help=("Storage path in <node>:<path> format, "
"Example: --path kube1.example.com:/exports/data"),
default=[],
action="append"
)
arg(
"--pvc",
help="Storage from pvc, Example: --pvc local-pvc-1",
default=[],
action="append"
)
arg(
"--external",
help="Storage from external gluster, Example: --external gluster-node:/gluster-volname",
default=None
)
arg(
"--tiebreaker",
help="If type is 'Replica2', one can have a tiebreaker node along "
"with it. like '--tiebreaker tie-breaker-node-name:/data/tiebreaker'",
default=None
)
utils.add_global_flags(parser)
def validate(args):
""" validate arguments """
if args.external is not None:
if args.type and args.type != "External":
print("'--external' option is used only with '--type External'",
file=sys.stderr)
sys.exit(1)
if ":" not in args.external:
print("Invalid external storage details. Please specify "
"details in the format <node>:/<volname>", file=sys.stderr)
sys.exit(1)
# Set type to External as '--external' option is provided
args.type = "External"
if args.tiebreaker:
if args.type != "Replica2":
print("'--tiebreaker' option should be used only with "
"type 'Replica2'", file=sys.stderr)
sys.exit(1)
if ":" not in args.tiebreaker:
print("Invalid tiebreaker details. Please specify details "
"in the format <node>:/<path>", file=sys.stderr)
sys.exit(1)
else:
args.tiebreaker = "tie-breaker.kadalu.io:/mnt"
if not args.type:
args.type = "Replica1"
num_storages = (len(args.device) + len(args.path) + len(args.pvc)) or \
(1 if args.external is not None else 0)
if num_storages == 0:
print("Please specify at least one storage", file=sys.stderr)
sys.exit(1)
# pylint: disable=too-many-boolean-expressions
if ((args.type == "Replica1" and num_storages != 1) or
(args.type == "Replica2" and num_storages != 2) or
(args.type == "Replica3" and num_storages != 3)):
print("Number of storages not matching for type=%s" % args.type,
file=sys.stderr)
sys.exit(1)
kube_nodes = get_kube_nodes(args)
for dev in args.device:
if ":" not in dev:
print("Invalid storage device details. Please specify device "
"details in the format <node>:<device>", file=sys.stderr)
sys.exit(1)
if (not args.dry_run) and (dev.split(":")[0] not in kube_nodes):
print("Node name does not appear to be valid: " + dev)
sys.exit(1)
for path in args.path:
if ":" not in path:
print("Invalid storage path details. Please specify path "
"details in the format <node>:<path>", file=sys.stderr)
sys.exit(1)
if (not args.dry_run) and (path.split(":")[0] not in kube_nodes):
print("Node name does not appear to be valid: " + path)
sys.exit(1)
def get_kube_nodes(args):
""" gets all nodes """
if args.dry_run:
return []
cmd = utils.kubectl_cmd(args) + ["get", "nodes", "-ojson"]
try:
resp = utils.execute(cmd)
data = json.loads(resp.stdout)
nodes = []
for nodedata in data["items"]:
nodes.append(nodedata["metadata"]["name"])
print("The following nodes are available:\n %s" % ", ".join(nodes))
print()
return nodes
except utils.CommandError as err:
utils.command_error(cmd, err.stderr)
except FileNotFoundError:
utils.kubectl_cmd_help(args.kubectl_cmd)
def storage_add_data(args):
""" Build the config file """
content = {
"apiVersion": "kadalu-operator.storage/v1alpha1",
"kind": "KadaluStorage",
"metadata": {
"name": args.name
},
"spec": {
"type": args.type,
"storage": []
}
}
# External details are specified, no 'storage' section required
if args.external:
node, vol = args.external.split(":")
content["spec"]["details"] = [
{
"gluster_host": node,
"gluster_volname": vol.strip("/")
}
]
return content
# Everything below can be provided for a 'Replica3' setup.
# Or two types of data can be provided for 'Replica2'.
# So, return only at the end.
# Device details are specified
if args.device:
for devdata in args.device:
node, dev = devdata.split(":")
content["spec"]["storage"].append(
{
"node": node,
"device": dev
}
)
# If Path is specified
if args.path:
for pathdata in args.path:
node, path = pathdata.split(":")
content["spec"]["storage"].append(
{
"node": node,
"path": path
}
)
# If PVC is specified
if args.pvc:
for pvc in args.pvc:
content["spec"]["storage"].append(
{
"pvc": pvc
}
)
# TODO: Support for different port can be added later
if args.type == "Replica2":
node, path = args.tiebreaker.split(":")
content["spec"]["tiebreaker"] = {
"node": node,
"path": path,
"port": 24007
}
return content
def run(args):
""" Adds the subcommand arguments back to main CLI tool """
data = storage_add_data(args)
yaml_content = to_storage_yaml(data)
print("Storage Yaml file for your reference:\n")
print(yaml_content)
if args.dry_run:
return
if not args.script_mode:
answer = ""
valid_answers = ["yes", "no", "n", "y"]
while answer not in valid_answers:
answer = input("Is this correct?(Yes/No): ")
answer = answer.strip().lower()
if answer in ["n", "no"]:
return
config, tempfile_path = tempfile.mkstemp(prefix="kadalu")
try:
with os.fdopen(config, 'w') as tmp:
tmp.write(yaml_content)
cmd = utils.kubectl_cmd(args) + ["create", "-f", tempfile_path]
resp = utils.execute(cmd)
print("Storage add request sent successfully")
print(resp.stdout)
print()
except utils.CommandError as err:
os.remove(tempfile_path)
utils.command_error(cmd, err.stderr)
except FileNotFoundError:
os.remove(tempfile_path)
utils.kubectl_cmd_help(args.kubectl_cmd)
finally:
if os.path.exists(tempfile_path):
os.remove(tempfile_path)
| 29.315985 | 96 | 0.547172 | [
"Apache-2.0"
] | Joibel/kadalu | cli/kubectl_kadalu/storage_add.py | 7,886 | Python |
from sun.geometry import Location, SunPath
from date_time import Date
loc = Location(
name='Ghent',
region='Belgium',
latitude=51.07,
longitude=3.69,
timezone='Europe/Brussels',
altitude=9.0
)
date = Date(year=2019, month=7, day=29)
sp = SunPath(loc, date)
sp.print_table()
| 19.933333 | 42 | 0.682274 | [
"MIT"
] | TomLXXVI/pypv | pypv/scripts/sun_path.py | 299 | Python |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=protected-access
r"""
This module contains the abstract base classes for defining PennyLane
operations and observables.
Description
-----------
Qubit Operations
~~~~~~~~~~~~~~~~
The :class:`Operator` class serves as a base class for operators,
and is inherited by both the :class:`Observable` class and the
:class:`Operation` class. These classes are subclassed to implement quantum operations
and measure observables in PennyLane.
* Each :class:`~.Operator` subclass represents a general type of
map between physical states. Each instance of these subclasses
represents either
- an application of the operator or
- an instruction to measure and return the respective result.
Operators act on a sequence of wires (subsystems) using given parameter values.
* Each :class:`~.Operation` subclass represents a type of quantum operation,
for example a unitary quantum gate. Each instance of these subclasses
represents an application of the operation with given parameter values to
a given sequence of wires (subsystems).
* Each :class:`~.Observable` subclass represents a type of physical observable.
Each instance of these subclasses represents an instruction to measure and
return the respective result for the given parameter values on a
sequence of wires (subsystems).
Differentiation
^^^^^^^^^^^^^^^
In general, an :class:`Operation` is differentiable (at least using the finite-difference
method) with respect to a parameter iff
* the domain of that parameter is continuous.
For an :class:`Operation` to be differentiable with respect to a parameter using the
analytic method of differentiation, it must satisfy an additional constraint:
* the parameter domain must be real.
.. note::
These conditions are *not* sufficient for analytic differentiation. For example,
CV gates must also define a matrix representing their Heisenberg linear
transformation on the quadrature operators.
For gates that *are* supported via the analytic method, the gradient recipe
works as follows:
.. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k+s_i).
where :math:`f` is the expectation value of an observable on a circuit that has
been evolved by the operation being considered with parameter :math:`\phi_k`,
there are multiple terms indexed with :math:`i` for each parameter :math:`\phi`
and the :math:`[c_i, a_i, s_i]` are coefficients specific to the gate.
The following specific case holds for example for qubit operations that are
generated by one of the Pauli matrices and results in an overall positive and
negative shift:
.. math::
\frac{\partial}{\partial\phi_k}f = \frac{1}{2}\left[f \left( \phi_k+\frac{\pi}{2} \right) - f
\left( \phi_k-\frac{\pi}{2} \right)\right],
i.e., so that :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]`.
CV Operation base classes
~~~~~~~~~~~~~~~~~~~~~~~~~
Due to additional requirements, continuous-variable (CV) operations must subclass the
:class:`~.CVOperation` or :class:`~.CVObservable` classes instead of :class:`~.Operation`
and :class:`~.Observable`.
Differentiation
^^^^^^^^^^^^^^^
To enable gradient computation using the analytic method for Gaussian CV operations, in addition, you need to
provide the static class method :meth:`~.CV._heisenberg_rep` that returns the Heisenberg representation of
the operation given its list of parameters, namely:
* For Gaussian CV Operations this method should return the matrix of the linear transformation carried out by the
operation on the vector of quadrature operators :math:`\mathbf{r}` for the given parameter
values.
* For Gaussian CV Observables this method should return a real vector (first-order observables)
or symmetric matrix (second-order observables) of coefficients of the quadrature
operators :math:`\x` and :math:`\p`.
PennyLane uses the convention :math:`\mathbf{r} = (\I, \x, \p)` for single-mode operations and observables
and :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)` for multi-mode operations and observables.
.. note::
Non-Gaussian CV operations and observables are currently only supported via
the finite-difference method of gradient computation.
"""
import abc
import copy
import itertools
import functools
import numbers
from enum import Enum, IntEnum
import numpy as np
from numpy.linalg import multi_dot
import pennylane as qml
from pennylane.wires import Wires
from .utils import pauli_eigs
from .variable import Variable
# =============================================================================
# Wire types
# =============================================================================
class WiresEnum(IntEnum):
"""Integer enumeration class
to represent the number of wires
an operation acts on"""
AnyWires = -1
AllWires = 0
AllWires = WiresEnum.AllWires
"""IntEnum: An enumeration which represents all wires in the
subsystem. It is equivalent to an integer with value 0."""
AnyWires = WiresEnum.AnyWires
"""IntEnum: An enumeration which represents any wires in the
subsystem. It is equivalent to an integer with value -1."""
# =============================================================================
# ObservableReturnTypes types
# =============================================================================
class ObservableReturnTypes(Enum):
"""Enumeration class to represent the return types of an observable."""
Sample = "sample"
Variance = "var"
Expectation = "expval"
Probability = "probs"
State = "state"
def __repr__(self):
"""String representation of the return types."""
return str(self.value)
Sample = ObservableReturnTypes.Sample
"""Enum: An enumeration which represents sampling an observable."""
Variance = ObservableReturnTypes.Variance
"""Enum: An enumeration which represents returning the variance of
an observable on specified wires."""
Expectation = ObservableReturnTypes.Expectation
"""Enum: An enumeration which represents returning the expectation
value of an observable on specified wires."""
Probability = ObservableReturnTypes.Probability
"""Enum: An enumeration which represents returning probabilities
of all computational basis states."""
State = ObservableReturnTypes.State
"""Enum: An enumeration which represents returning the state in the computational basis."""
# =============================================================================
# Class property
# =============================================================================
class ClassPropertyDescriptor: # pragma: no cover
"""Allows a class property to be defined"""
# pylint: disable=too-few-public-methods
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
"""Set the function as a class method, and store as an attribute."""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
"""The class property decorator"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
# =============================================================================
# Base Operator class
# =============================================================================
class Operator(abc.ABC):
r"""Base class for quantum operators supported by a device.
The following class attributes must be defined for all Operators:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
Args:
params (tuple[float, int, array, Variable]): operator parameters
Keyword Args:
wires (Iterable[Number, str], Number, str, Wires): Wires that the operator acts on.
If not given, args[-1] is interpreted as wires.
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue.
"""
do_check_domain = True #: bool: flag: should we perform a domain check for the parameters?
def __copy__(self):
cls = self.__class__
copied_op = cls.__new__(cls)
copied_op.data = self.data.copy()
copied_op._wires = self.wires
copied_op._name = self._name
if hasattr(self, "_inverse"):
copied_op._inverse = self._inverse
return copied_op
def __deepcopy__(self, memo):
cls = self.__class__
copied_op = cls.__new__(cls)
# The memo dict maps object ID to object, and is required by
# the deepcopy function to keep track of objects it has already
# deep copied.
memo[id(self)] = copied_op
for attribute, value in self.__dict__.items():
if attribute == "data":
# Shallow copy the list of parameters. We avoid a deep copy
# here, since PyTorch does not support deep copying of tensors
# within a differentiable computation.
copied_op.data = value.copy()
else:
# Deep copy everything else.
setattr(copied_op, attribute, copy.deepcopy(value, memo))
return copied_op
@classmethod
def _matrix(cls, *params):
"""Matrix representation of the operator
in the computational basis.
This is a *class method* that should be defined for all
new operations and observables, that returns the matrix representing
the operator in the computational basis.
This private method allows matrices to be computed
directly without instantiating the operators first.
To return the matrices of *instantiated* operators,
please use the :attr:`~.Operator.matrix` property instead.
**Example:**
>>> qml.RY._matrix(0.5)
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
"""
raise NotImplementedError
@property
def matrix(self):
r"""Matrix representation of an instantiated operator
in the computational basis.
**Example:**
>>> U = qml.RY(0.5, wires=1)
>>> U.matrix
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
"""
return self._matrix(*self.parameters)
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the operator.
This is a *class method* that should be defined for all
new operations and observables that returns the eigenvalues
of the operator. Note that the eigenvalues are not guaranteed
to be in any particular order.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
The default implementation relies on the presence of the
:attr:`_matrix` method.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
"""
return np.linalg.eigvals(cls._matrix(*params))
@property
def eigvals(self):
r"""Eigenvalues of an instantiated operator.
Note that the eigenvalues are not guaranteed to be in any
particular order.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
"""
return self._eigvals(*self.parameters)
@property
@abc.abstractmethod
def num_params(self):
"""Number of parameters the operator takes."""
@property
@abc.abstractmethod
def num_wires(self):
"""Number of wires the operator acts on."""
@property
@abc.abstractmethod
def par_domain(self):
"""Domain of the gate parameters.
* ``'N'``: natural numbers (including zero).
* ``'R'``: floats.
* ``'A'``: arrays of real or complex values.
* ``'L'``: list of arrays of real or complex values.
* ``None``: if there are no parameters.
"""
@property
def name(self):
"""String for the name of the operator."""
return self._name
@name.setter
def name(self, value):
self._name = value
def __init__(self, *params, wires=None, do_queue=True):
# pylint: disable=too-many-branches
self._name = self.__class__.__name__ #: str: name of the operator
self.queue_idx = None #: int, None: index of the Operator in the circuit queue, or None if not in a queue
if wires is None:
raise ValueError("Must specify the wires that {} acts on".format(self.name))
if isinstance(wires, Wires):
self._wires = wires
else:
self._wires = Wires(wires) #: Wires: wires on which the operator acts
# check that the number of wires given corresponds to required number
if (
self.num_wires != AllWires
and self.num_wires != AnyWires
and len(self._wires) != self.num_wires
):
raise ValueError(
"{}: wrong number of wires. "
"{} wires given, {} expected.".format(self.name, len(self._wires), self.num_wires)
)
if len(params) != self.num_params:
raise ValueError(
"{}: wrong number of parameters. "
"{} parameters passed, {} expected.".format(self.name, len(params), self.num_params)
)
# check the validity of the params
if self.do_check_domain:
for p in params:
self.check_domain(p)
self.data = list(params) #: list[Any]: parameters of the operator
if do_queue:
self.queue()
def __repr__(self):
"""Constructor-call-like representation."""
# FIXME using self.parameters here instead of self.data is dangerous, it assumes the data can be evaluated
# which is only true if something suitable happens to remain in VariableRef.positional_arg_values etc. after
# the last evaluation.
if self.parameters:
params = ", ".join([repr(p) for p in self.parameters])
return "{}({}, wires={})".format(self.name, params, self.wires.tolist())
return "{}(wires={})".format(self.name, self.wires.tolist())
def check_domain(self, p, flattened=False):
"""Check the validity of a parameter.
:class:`.Variable` instances can represent any real scalars (but not arrays).
Args:
p (Number, array, Variable): parameter to check
flattened (bool): True means p is an element of a flattened parameter
sequence (affects the handling of 'A' parameters)
Raises:
TypeError: parameter is not an element of the expected domain
ValueError: parameter is an element of an unknown domain
Returns:
Number, array, Variable: p
"""
# pylint: disable=too-many-branches
# If parameter is a NumPy scalar, convert it into a Python scalar.
if isinstance(p, np.ndarray) and p.ndim == 0:
p = p.item()
if isinstance(p, Variable):
if self.par_domain == "A":
raise TypeError(
"{}: Array parameter expected, got a Variable, "
"which can only represent real scalars.".format(self.name)
)
return p
# p is not a Variable
if self.par_domain == "A":
if flattened:
if isinstance(p, np.ndarray):
raise TypeError(
"{}: Flattened array parameter expected, got {}.".format(self.name, type(p))
)
else:
if not isinstance(p, np.ndarray):
raise TypeError(
"{}: Array parameter expected, got {}.".format(self.name, type(p))
)
elif self.par_domain in ("R", "N"):
if not isinstance(p, numbers.Real):
raise TypeError(
"{}: Real scalar parameter expected, got {}.".format(self.name, type(p))
)
if self.par_domain == "N":
if not isinstance(p, numbers.Integral):
raise TypeError(
"{}: Natural number parameter expected, got {}.".format(self.name, type(p))
)
if p < 0:
raise TypeError(
"{}: Natural number parameter expected, got {}.".format(self.name, p)
)
elif self.par_domain == "L":
if not isinstance(p, list):
raise TypeError("{}: List parameter expected, got {}.".format(self.name, type(p)))
if not all(isinstance(elem, np.ndarray) for elem in p):
raise TypeError("List elements must be Numpy arrays.")
else:
raise ValueError(
"{}: Unknown parameter domain '{}'.".format(self.name, self.par_domain)
)
return p
@property
def wires(self):
"""Wires of this operator.
Returns:
Wires: wires
"""
return self._wires
@property
def parameters(self):
"""Current parameter values.
Fixed parameters are returned as is, free parameters represented by
:class:`.Variable` instances are replaced by their
current numerical value.
Returns:
list[Any]: parameter values
"""
# TODO profiling
def evaluate(p):
"""Evaluate a single parameter."""
if isinstance(p, np.ndarray):
# object arrays may have Variables inside them
if p.dtype == object:
temp = np.array([x.val if isinstance(x, Variable) else x for x in p.flat])
return temp.reshape(p.shape)
return p
if isinstance(p, list):
# p is assumed to be a list of numpy arrays
# object arrays may have Variables inside them
evaled_list = []
for arr in p:
if arr.dtype == object:
temp = np.array([x.val if isinstance(x, Variable) else x for x in arr.flat])
evaled_list.append(temp.reshape(arr.shape))
return evaled_list
return p
if isinstance(p, Variable):
p = self.check_domain(p.val)
return p
return [evaluate(p) for p in self.data]
def queue(self):
"""Append the operator to the Operator queue."""
qml.QueuingContext.append(self)
return self # so pre-constructed Observable instances can be queued and returned in a single statement
# =============================================================================
# Base Operation class
# =============================================================================
class Operation(Operator):
r"""Base class for quantum operations supported by a device.
As with :class:`~.Operator`, the following class attributes must be
defined for all operations:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
The following two class attributes are optional, but in most cases
should be clearly defined to avoid unexpected behavior during
differentiation.
* :attr:`~.Operation.grad_method`
* :attr:`~.Operation.grad_recipe`
Finally, there are some additional optional class attributes
that may be set, and used by certain quantum optimizers:
* :attr:`~.Operation.generator`
Args:
params (tuple[float, int, array, Variable]): operation parameters
Keyword Args:
wires (Sequence[int]): Subsystems it acts on. If not given, args[-1]
is interpreted as wires.
do_queue (bool): Indicates whether the operation should be
immediately pushed into a :class:`BaseQNode` circuit queue.
This flag is useful if there is some reason to run an Operation
outside of a BaseQNode context.
"""
# pylint: disable=abstract-method
string_for_inverse = ".inv"
@property
def grad_method(self):
"""Gradient computation method.
* ``'A'``: analytic differentiation using the parameter-shift method.
* ``'F'``: finite difference numerical differentiation.
* ``None``: the operation may not be differentiated.
Default is ``'F'``, or ``None`` if the Operation has zero parameters.
"""
return None if self.num_params == 0 else "F"
grad_recipe = None
r"""tuple(Union(list[list[float]], None)) or None: Gradient recipe for the
parameter-shift method.
This is a tuple with one nested list per operation parameter. For
parameter :math:`\phi_k`, the nested list contains elements of the form
:math:`[c_i, a_i, s_i]` where :math:`i` is the index of the
term, resulting in a gradient recipe of
.. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k + s_i).
If ``None``, the default gradient recipe containing the two terms
:math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1,
s_1]=[-1/2, 1, -\pi/2]` is assumed for every parameter.
"""
def get_parameter_shift(self, idx, shift=np.pi / 2):
"""Multiplier and shift for the given parameter, based on its gradient recipe.
Args:
idx (int): parameter index
Returns:
float, float: multiplier, shift
"""
# get the gradient recipe for this parameter
recipe = self.grad_recipe[idx]
# Default values
multiplier = 0.5 / np.sin(shift)
a = 1
# We set the default recipe following:
# ∂f(x) = c*f(x+s) - c*f(x-s)
# where we express a positive and a negative shift by default
default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]]
param_shift = default_param_shift if recipe is None else recipe
if hasattr(self.data[idx], "mult"):
# Parameter is a variable, we are in non-tape mode
# Need to use the internal multiplier in the Variable to update the
# multiplier and the shift
var_mult = self.data[idx].mult
for elem in param_shift:
# Update the multiplier
elem[0] *= var_mult
if var_mult != 0:
# Update the shift
# zero multiplier means the shift is unimportant
elem[2] /= var_mult
return param_shift
@property
def generator(self):
r"""Generator of the operation.
A length-2 list ``[generator, scaling_factor]``, where
* ``generator`` is an existing PennyLane
operation class or :math:`2\times 2` Hermitian array
that acts as the generator of the current operation
* ``scaling_factor`` represents a scaling factor applied
to the generator operation
For example, if :math:`U(\theta)=e^{i0.7\theta \sigma_x}`, then
:math:`\sigma_x`, with scaling factor :math:`s`, is the generator
of operator :math:`U(\theta)`:
.. code-block:: python
generator = [PauliX, 0.7]
Default is ``[None, 1]``, indicating the operation has no generator.
"""
return [None, 1]
@property
def inverse(self):
"""Boolean determining if the inverse of the operation was requested."""
return self._inverse
@inverse.setter
def inverse(self, boolean):
self._inverse = boolean
@staticmethod
def decomposition(*params, wires):
"""Returns a template decomposing the operation into other
quantum operations."""
raise NotImplementedError
def inv(self):
"""Inverts the operation, such that the inverse will
be used for the computations by the specific device.
This method concatenates a string to the name of the operation,
to indicate that the inverse will be used for computations.
Any subsequent call of this method will toggle between the original
operation and the inverse of the operation.
Returns:
:class:`Operator`: operation to be inverted
"""
self.inverse = not self._inverse
return self
@property
def matrix(self):
op_matrix = self._matrix(*self.parameters)
if self.inverse:
return op_matrix.conj().T
return op_matrix
@property
def eigvals(self):
op_eigvals = self._eigvals(*self.parameters)
if self.inverse:
return op_eigvals.conj()
return op_eigvals
@property
def base_name(self):
"""Get base name of the operator."""
return self.__class__.__name__
@property
def name(self):
"""Get and set the name of the operator."""
return self._name + Operation.string_for_inverse if self.inverse else self._name
def __init__(self, *params, wires=None, do_queue=True):
self._inverse = False
# check the grad_method validity
if self.par_domain == "N":
assert (
self.grad_method is None
), "An operation may only be differentiated with respect to real scalar parameters."
elif self.par_domain == "A":
assert self.grad_method in (
None,
"F",
), "Operations that depend on arrays containing free variables may only be differentiated using the F method."
# check the grad_recipe validity
if self.grad_method == "A":
if self.grad_recipe is None:
# default recipe for every parameter
self.grad_recipe = [None] * self.num_params
else:
assert (
len(self.grad_recipe) == self.num_params
), "Gradient recipe must have one entry for each parameter!"
else:
assert self.grad_recipe is None, "Gradient recipe is only used by the A method!"
super().__init__(*params, wires=wires, do_queue=do_queue)
class DiagonalOperation(Operation):
r"""Base class for diagonal quantum operations supported by a device.
As with :class:`~.Operation`, the following class attributes must be
defined for all operations:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
The following two class attributes are optional, but in most cases
should be clearly defined to avoid unexpected behavior during
differentiation.
* :attr:`~.Operation.grad_method`
* :attr:`~.Operation.grad_recipe`
Finally, there are some additional optional class attributes
that may be set, and used by certain quantum optimizers:
* :attr:`~.Operation.generator`
Args:
params (tuple[float, int, array, Variable]): operation parameters
Keyword Args:
wires (Sequence[int]): Subsystems it acts on. If not given, args[-1]
is interpreted as wires.
do_queue (bool): Indicates whether the operation should be
immediately pushed into a :class:`BaseQNode` circuit queue.
This flag is useful if there is some reason to run an Operation
outside of a BaseQNode context.
"""
# pylint: disable=abstract-method
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the operator.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
"""
raise NotImplementedError
@property
def eigvals(self):
r"""Eigenvalues of an instantiated diagonal operation.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
"""
return super().eigvals
@classmethod
def _matrix(cls, *params):
return np.diag(cls._eigvals(*params))
class Channel(Operation, abc.ABC):
r"""Base class for quantum channels.
As with :class:`~.Operation`, the following class attributes must be
defined for all channels:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
To define a noisy channel, the following attribute of :class:`~.Channel`
can be used to list the corresponding Kraus matrices.
* :attr:`~.Channel._kraus_matrices`
The following two class attributes are optional, but in most cases
should be clearly defined to avoid unexpected behavior during
differentiation.
* :attr:`~.Operation.grad_method`
* :attr:`~.Operation.grad_recipe`
Args:
params (tuple[float, int, array, Variable]): operation parameters
Keyword Args:
wires (Sequence[int]): Subsystems the channel acts on. If not given, args[-1]
is interpreted as wires.
do_queue (bool): Indicates whether the operation should be
immediately pushed into a :class:`BaseQNode` circuit queue.
This flag is useful if there is some reason to run an Operation
outside of a BaseQNode context.
"""
# pylint: disable=abstract-method
@classmethod
@abc.abstractmethod
def _kraus_matrices(cls, *params):
"""Kraus matrices representing a quantum channel, specified in
the computational basis.
This is a class method that should be defined for all
new channels. It returns the Kraus matrices representing
the channel in the computational basis.
This private method allows matrices to be computed
directly without instantiating the channel first.
**Example**
>>> qml.AmplitudeDamping._kraus_matrices(0.1)
>>> [array([[1. , 0. ],
[0. , 0.9486833]]), array([[0. , 0.31622777],
[0. , 0. ]])]
To return the Kraus matrices of an *instantiated* channel,
please use the :attr:`~.Operator.kraus_matrices` property instead.
Returns:
list(array): list of Kraus matrices
"""
raise NotImplementedError
@property
def kraus_matrices(self):
r"""Kraus matrices of an instantiated channel
in the computational basis.
** Example**
>>> U = qml.AmplitudeDamping(0.1, wires=1)
>>> U.kraus_matrices
>>> [array([[1. , 0. ],
[0. , 0.9486833]]), array([[0. , 0.31622777],
[0. , 0. ]])]
Returns:
list(array): list of Kraus matrices
"""
return self._kraus_matrices(*self.parameters)
# =============================================================================
# Base Observable class
# =============================================================================
class Observable(Operator):
"""Base class for observables supported by a device.
:class:`Observable` is used to describe Hermitian quantum observables.
As with :class:`~.Operator`, the following class attributes must be
defined for all observables:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
Args:
params (tuple[float, int, array, Variable]): observable parameters
Keyword Args:
wires (Sequence[int]): subsystems it acts on.
Currently, only one subsystem is supported.
do_queue (bool): Indicates whether the operation should be
immediately pushed into the Operator queue.
"""
# pylint: disable=abstract-method
return_type = None
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.PauliZ._eigvals()
>>> array([1, -1])
Returns:
array: eigenvalue representation
"""
raise NotImplementedError
@property
def eigvals(self):
r"""Eigenvalues of an instantiated observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`. This is a
requirement for using qubit observables in quantum functions.
**Example:**
>>> U = qml.PauliZ(wires=1)
>>> U.eigvals
>>> array([1, -1])
Returns:
array: eigvals representation
"""
return super().eigvals
def __init__(self, *params, wires=None, do_queue=True):
# extract the arguments
if wires is None:
wires = params[-1]
params = params[:-1]
super().__init__(*params, wires=wires, do_queue=do_queue)
def __repr__(self):
"""Constructor-call-like representation."""
temp = super().__repr__()
if self.return_type is None:
return temp
if self.return_type is Probability:
return repr(self.return_type) + "(wires={})".format(self.wires.tolist())
return repr(self.return_type) + "(" + temp + ")"
def __matmul__(self, other):
if isinstance(other, Tensor):
return other.__rmatmul__(self)
if isinstance(other, Observable):
return Tensor(self, other)
raise ValueError("Can only perform tensor products between observables.")
def _obs_data(self):
r"""Extracts the data from a Observable or Tensor and serializes it in an order-independent fashion.
This allows for comparison between observables that are equivalent, but are expressed
in different orders. For example, `qml.PauliX(0) @ qml.PauliZ(1)` and
`qml.PauliZ(1) @ qml.PauliX(0)` are equivalent observables with different orderings.
**Example**
>>> tensor = qml.PauliX(0) @ qml.PauliZ(1)
>>> print(tensor._obs_data())
{("PauliZ", <Wires = [1]>, ()), ("PauliX", <Wires = [0]>, ())}
"""
obs = Tensor(self).non_identity_obs
tensor = set()
for ob in obs:
parameters = tuple(param.tostring() for param in ob.parameters)
tensor.add((ob.name, ob.wires, parameters))
return tensor
def compare(self, other):
r"""Compares with another :class:`~.Hamiltonian`, :class:`~Tensor`, or :class:`~Observable`,
to determine if they are equivalent.
Observables/Hamiltonians are equivalent if they represent the same operator
(their matrix representations are equal), and they are defined on the same wires.
.. Warning::
The compare method does **not** check if the matrix representation
of a :class:`~.Hermitian` observable is equal to an equivalent
observable expressed in terms of Pauli matrices.
To do so would require the matrix form of Hamiltonians and Tensors
be calculated, which would drastically increase runtime.
Returns:
(bool): True if equivalent.
**Examples**
>>> ob1 = qml.PauliX(0) @ qml.Identity(1)
>>> ob2 = qml.Hamiltonian([1], [qml.PauliX(0)])
>>> ob1.compare(ob2)
True
>>> ob1 = qml.PauliX(0)
>>> ob2 = qml.Hermitian(np.array([[0, 1], [1, 0]]), 0)
>>> ob1.compare(ob2)
False
"""
if isinstance(other, (Tensor, Observable)):
return other._obs_data() == self._obs_data()
if isinstance(other, qml.Hamiltonian):
return other.compare(self)
raise ValueError(
"Can only compare an Observable/Tensor, and a Hamiltonian/Observable/Tensor."
)
def __add__(self, other):
r"""The addition operation between Observables/Tensors/qml.Hamiltonian objects."""
if isinstance(other, (Observable, Tensor)):
return qml.Hamiltonian([1, 1], [self, other], simplify=True)
if isinstance(other, qml.Hamiltonian):
return other + self
raise ValueError(f"Cannot add Observable and {type(other)}")
def __mul__(self, a):
r"""The scalar multiplication operation between a scalar and an Observable/Tensor."""
if isinstance(a, (int, float)):
return qml.Hamiltonian([a], [self], simplify=True)
raise ValueError(f"Cannot multiply Observable by {type(a)}")
__rmul__ = __mul__
def __sub__(self, other):
r"""The subtraction operation between Observables/Tensors/qml.Hamiltonian objects."""
if isinstance(other, (Observable, Tensor, qml.Hamiltonian)):
return self.__add__(other.__mul__(-1))
raise ValueError(f"Cannot subtract {type(other)} from Observable")
def diagonalizing_gates(self):
r"""Returns the list of operations such that they
diagonalize the observable in the computational basis.
Returns:
list(qml.Operation): A list of gates that diagonalize
the observable in the computational basis.
"""
raise NotImplementedError
class Tensor(Observable):
"""Container class representing tensor products of observables.
To create a tensor, simply initiate it like so:
>>> T = Tensor(qml.PauliX(0), qml.Hermitian(A, [1, 2]))
You can also create a tensor from other Tensors:
>>> T = Tensor(T, qml.PauliZ(4))
The ``@`` symbol can be used as a tensor product operation:
>>> T = qml.PauliX(0) @ qml.Hadamard(2)
"""
# pylint: disable=abstract-method
return_type = None
tensor = True
par_domain = None
def __init__(self, *args): # pylint: disable=super-init-not-called
self._eigvals_cache = None
self.obs = []
for o in args:
if isinstance(o, Tensor):
self.obs.extend(o.obs)
elif isinstance(o, Observable):
self.obs.append(o)
else:
raise ValueError("Can only perform tensor products between observables.")
def __copy__(self):
cls = self.__class__
copied_op = cls.__new__(cls)
copied_op.obs = self.obs.copy()
copied_op._eigvals_cache = self._eigvals_cache
return copied_op
def __repr__(self):
"""Constructor-call-like representation."""
s = " @ ".join([repr(o) for o in self.obs])
if self.return_type is None:
return s
if self.return_type is Probability:
return repr(self.return_type) + "(wires={})".format(self.wires.tolist())
return repr(self.return_type) + "(" + s + ")"
@property
def name(self):
"""All constituent observable names making up the tensor product.
Returns:
list[str]: list containing all observable names
"""
return [o.name for o in self.obs]
@property
def num_wires(self):
"""Number of wires the tensor product acts on.
Returns:
int: number of wires
"""
return len(self.wires)
@property
def wires(self):
"""All wires in the system the tensor product acts on.
Returns:
Wires: wires addressed by the observables in the tensor product
"""
return Wires.all_wires([o.wires for o in self.obs])
@property
def data(self):
"""Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
"""
return [p for sublist in [o.data for o in self.obs] for p in sublist]
@property
def num_params(self):
"""Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
"""
return len(self.data)
@property
def parameters(self):
"""Evaluated parameter values of all constituent observables in the tensor product.
Returns:
list[list[Any]]: nested list containing the parameters per observable
in the tensor product
"""
return [o.parameters for o in self.obs]
@property
def non_identity_obs(self):
"""Returns the non-identity observables contained in the tensor product.
Returns:
list[:class:`~.Observable`]: list containing the non-identity observables
in the tensor product
"""
return [obs for obs in self.obs if not isinstance(obs, qml.Identity)]
def __matmul__(self, other):
if isinstance(other, Tensor):
self.obs.extend(other.obs)
return self
if isinstance(other, Observable):
self.obs.append(other)
return self
raise ValueError("Can only perform tensor products between observables.")
def __rmatmul__(self, other):
if isinstance(other, Observable):
self.obs[:0] = [other]
return self
raise ValueError("Can only perform tensor products between observables.")
__imatmul__ = __matmul__
@property
def eigvals(self):
"""Return the eigenvalues of the specified tensor product observable.
This method uses pre-stored eigenvalues for standard observables where
possible.
Returns:
array[float]: array containing the eigenvalues of the tensor product
observable
"""
if self._eigvals_cache is not None:
return self._eigvals_cache
standard_observables = {"PauliX", "PauliY", "PauliZ", "Hadamard"}
# observable should be Z^{\otimes n}
self._eigvals_cache = pauli_eigs(len(self.wires))
# Sort observables lexicographically by the strings of the wire labels
# TODO: check for edge cases of the sorting, e.g. Tensor(Hermitian(obs, wires=[0, 2]),
# Hermitian(obs, wires=[1, 3, 4])
# Sorting the observables based on wires, so that the order of
# the eigenvalues is correct
obs_sorted = sorted(self.obs, key=lambda x: [str(l) for l in x.wires.labels])
# check if there are any non-standard observables (such as Identity)
if set(self.name) - standard_observables:
# Tensor product of observables contains a mixture
# of standard and non-standard observables
self._eigvals_cache = np.array([1])
for k, g in itertools.groupby(obs_sorted, lambda x: x.name in standard_observables):
if k:
# Subgroup g contains only standard observables.
self._eigvals_cache = np.kron(self._eigvals_cache, pauli_eigs(len(list(g))))
else:
# Subgroup g contains only non-standard observables.
for ns_ob in g:
# loop through all non-standard observables
self._eigvals_cache = np.kron(self._eigvals_cache, ns_ob.eigvals)
return self._eigvals_cache
def diagonalizing_gates(self):
"""Return the gate set that diagonalizes a circuit according to the
specified tensor observable.
This method uses pre-stored eigenvalues for standard observables where
possible and stores the corresponding eigenvectors from the eigendecomposition.
Returns:
list: list containing the gates diagonalizing the tensor observable
"""
diag_gates = []
for o in self.obs:
diag_gates.extend(o.diagonalizing_gates())
return diag_gates
@property
def matrix(self):
r"""Matrix representation of the tensor operator
in the computational basis.
**Example:**
Note that the returned matrix *only includes explicitly
declared observables* making up the tensor product;
that is, it only returns the matrix for the specified
subsystem it is defined for.
>>> O = qml.PauliZ(0) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1, 0, 0, 0],
[ 0, -1, 0, 0],
[ 0, 0, -1, 0],
[ 0, 0, 0, 1]])
To get the full :math:`2^3\times 2^3` Hermitian matrix
acting on the 3-qubit system, the identity on wire 1
must be explicitly included:
>>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1., 0., 0., 0., 0., 0., 0., 0.],
[ 0., -1., 0., -0., 0., -0., 0., -0.],
[ 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., -0., 0., -1., 0., -0., 0., -0.],
[ 0., 0., 0., 0., -1., -0., -0., -0.],
[ 0., -0., 0., -0., -0., 1., -0., 0.],
[ 0., 0., 0., 0., -0., -0., -1., -0.],
[ 0., -0., 0., -0., -0., 0., -0., 1.]])
Returns:
array: matrix representation
"""
# group the observables based on what wires they act on
U_list = []
for _, g in itertools.groupby(self.obs, lambda x: x.wires.labels):
# extract the matrices of each diagonalizing gate
mats = [i.matrix for i in g]
if len(mats) > 1:
# multiply all unitaries together before appending
mats = [multi_dot(mats)]
# append diagonalizing unitary for specific wire to U_list
U_list.append(mats[0])
# Return the Hermitian matrix representing the observable
# over the defined wires.
return functools.reduce(np.kron, U_list)
def prune(self):
"""Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from
the observables building up the :class:`~.Tensor`.
The ``return_type`` attribute is preserved while pruning.
If the tensor product only contains one observable, then this observable instance is
returned.
Note that, as a result, this method can return observables that are not a :class:`~.Tensor`
instance.
**Example:**
Pruning that returns a :class:`~.Tensor`:
>>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
>>> O.prune()
<pennylane.operation.Tensor at 0x7fc1642d1590
>>> [(o.name, o.wires) for o in O.prune().obs]
[('PauliZ', [0]), ('PauliZ', [2])]
Pruning that returns a single observable:
>>> O = qml.PauliZ(0) @ qml.Identity(1)
>>> O_pruned = O.prune()
>>> (O_pruned.name, O_pruned.wires)
('PauliZ', [0])
Returns:
~.Observable: the pruned tensor product of observables
"""
if len(self.non_identity_obs) == 0:
# Return a single Identity as the tensor only contains Identities
obs = qml.Identity(self.wires[0])
elif len(self.non_identity_obs) == 1:
obs = self.non_identity_obs[0]
else:
obs = Tensor(*self.non_identity_obs)
obs.return_type = self.return_type
return obs
# =============================================================================
# CV Operations and observables
# =============================================================================
class CV:
"""A mixin base class denoting a continuous-variable operation."""
# pylint: disable=no-member
def heisenberg_expand(self, U, wires):
"""Expand the given local Heisenberg-picture array into a full-system one.
Args:
U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``)
wires (Wires): wires on the device the array ``U`` should be expanded
to apply to
Raises:
ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect
Returns:
array[float]: expanded array, dimension ``1+2*num_wires``
"""
U_dim = len(U)
nw = len(self.wires)
if U.ndim > 2:
raise ValueError("Only order-1 and order-2 arrays supported.")
if U_dim != 1 + 2 * nw:
raise ValueError("{}: Heisenberg matrix is the wrong size {}.".format(self.name, U_dim))
if len(wires) == 0 or len(self.wires) == len(wires):
# no expansion necessary (U is a full-system matrix in the correct order)
return U
if not wires.contains_wires(self.wires):
raise ValueError(
"{}: Some observable wires {} do not exist on this device with wires {}".format(
self.name, self.wires, wires
)
)
# get the indices that the operation's wires have on the device
wire_indices = wires.indices(self.wires)
# expand U into the I, x_0, p_0, x_1, p_1, ... basis
dim = 1 + len(wires) * 2
def loc(w):
"Returns the slice denoting the location of (x_w, p_w) in the basis."
ind = 2 * w + 1
return slice(ind, ind + 2)
if U.ndim == 1:
W = np.zeros(dim)
W[0] = U[0]
for k, w in enumerate(wire_indices):
W[loc(w)] = U[loc(k)]
elif U.ndim == 2:
if isinstance(self, Observable):
W = np.zeros((dim, dim))
else:
W = np.eye(dim)
W[0, 0] = U[0, 0]
for k1, w1 in enumerate(wire_indices):
s1 = loc(k1)
d1 = loc(w1)
# first column
W[d1, 0] = U[s1, 0]
# first row (for gates, the first row is always (1, 0, 0, ...), but not for observables!)
W[0, d1] = U[0, s1]
for k2, w2 in enumerate(wire_indices):
W[d1, loc(w2)] = U[s1, loc(k2)] # block k1, k2 in U goes to w1, w2 in W.
return W
@staticmethod
def _heisenberg_rep(p):
r"""Heisenberg picture representation of the operation.
* For Gaussian CV gates, this method returns the matrix of the linear
transformation carried out by the gate for the given parameter values.
The method is not defined for non-Gaussian gates.
**The existence of this method is equivalent to setting** ``grad_method = 'A'``.
* For observables, returns a real vector (first-order observables) or
symmetric matrix (second-order observables) of expansion coefficients
of the observable.
For single-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x, \p)`.
For multi-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
.. note::
For gates, we assume that the inverse transformation is obtained
by negating the first parameter.
Args:
p (Sequence[float]): parameter values for the transformation
Returns:
array[float]: :math:`\tilde{U}` or :math:`q`
"""
# pylint: disable=unused-argument
return None
@classproperty
def supports_heisenberg(self):
"""Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep`
static method, thereby indicating that it is Gaussian and does not block the use
of the parameter-shift differentiation method if found between the differentiated gate
and an observable.
"""
return CV._heisenberg_rep != self._heisenberg_rep
class CVOperation(CV, Operation):
"""Base class for continuous-variable quantum operations."""
# pylint: disable=abstract-method
@classproperty
def supports_parameter_shift(self):
"""Returns True iff the CV Operation supports the parameter-shift differentiation method.
This means that it has ``grad_method='A'`` and
has overridden the :meth:`~.CV._heisenberg_rep` static method.
"""
return self.grad_method == "A" and self.supports_heisenberg
def heisenberg_pd(self, idx):
"""Partial derivative of the Heisenberg picture transform matrix.
Computed using grad_recipe.
Args:
idx (int): index of the parameter with respect to which the
partial derivative is computed.
Returns:
array[float]: partial derivative
"""
# get the gradient recipe for this parameter
recipe = self.grad_recipe[idx]
# Default values
multiplier = 0.5
a = 1
shift = np.pi / 2
# We set the default recipe to as follows:
# ∂f(x) = c*f(x+s) - c*f(x-s)
default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]]
param_shift = default_param_shift if recipe is None else recipe
pd = None # partial derivative of the transformation
p = self.parameters
original_p_idx = p[idx]
for c, _a, s in param_shift:
# evaluate the transform at the shifted parameter values
p[idx] = _a * original_p_idx + s
U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
if pd is None:
pd = c * U
else:
pd += c * U
return pd
def heisenberg_tr(self, wires, inverse=False):
r"""Heisenberg picture representation of the linear transformation carried
out by the gate at current parameter values.
Given a unitary quantum gate :math:`U`, we may consider its linear
transformation in the Heisenberg picture, :math:`U^\dagger(\cdot) U`.
If the gate is Gaussian, this linear transformation preserves the polynomial order
of any observables that are polynomials in :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
This also means it maps :math:`\text{span}(\mathbf{r})` into itself:
.. math:: U^\dagger \mathbf{r}_i U = \sum_j \tilde{U}_{ij} \mathbf{r}_j
For Gaussian CV gates, this method returns the transformation matrix for
the current parameter values of the Operation. The method is not defined
for non-Gaussian (and non-CV) gates.
Args:
wires (Wires): wires on the device that the observable gets applied to
inverse (bool): if True, return the inverse transformation instead
Raises:
RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method
Returns:
array[float]: :math:`\tilde{U}`, the Heisenberg picture representation of the linear transformation
"""
p = self.parameters
if inverse:
if self.par_domain == "A":
# TODO: expand this for the new par domain class, for non-unitary matrices.
p[0] = np.linalg.inv(p[0])
else:
p[0] = -p[0] # negate first parameter
U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
# not defined?
if U is None:
raise RuntimeError(
"{} is not a Gaussian operation, or is missing the _heisenberg_rep method.".format(
self.name
)
)
return self.heisenberg_expand(U, wires)
class CVObservable(CV, Observable):
r"""Base class for continuous-variable observables.
The class attribute :attr:`~.ev_order` can be defined to indicate
to PennyLane whether the corresponding CV observable is a polynomial in the
quadrature operators. If so,
* ``ev_order = 1`` indicates a first order polynomial in quadrature
operators :math:`(\x, \p)`.
* ``ev_order = 2`` indicates a second order polynomial in quadrature
operators :math:`(\x, \p)`.
If :attr:`~.ev_order` is not ``None``, then the Heisenberg representation
of the observable should be defined in the static method :meth:`~.CV._heisenberg_rep`,
returning an array of the correct dimension.
"""
# pylint: disable=abstract-method
ev_order = None #: None, int: if not None, the observable is a polynomial of the given order in `(x, p)`.
def heisenberg_obs(self, wires):
r"""Representation of the observable in the position/momentum operator basis.
Returns the expansion :math:`q` of the observable, :math:`Q`, in the
basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
* For first-order observables returns a real vector such
that :math:`Q = \sum_i q_i \mathbf{r}_i`.
* For second-order observables returns a real symmetric matrix
such that :math:`Q = \sum_{ij} q_{ij} \mathbf{r}_i \mathbf{r}_j`.
Args:
wires (Wires): wires on the device that the observable gets applied to
Returns:
array[float]: :math:`q`
"""
p = self.parameters
U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
return self.heisenberg_expand(U, wires)
def operation_derivative(operation) -> np.ndarray:
r"""Calculate the derivative of an operation.
For an operation :math:`e^{i \hat{H} \phi t}`, this function returns the matrix representation
in the standard basis of its derivative with respect to :math:`t`, i.e.,
.. math:: \frac{d \, e^{i \hat{H} \phi t}}{dt} = i \phi \hat{H} e^{i \hat{H} \phi t},
where :math:`\phi` is a real constant.
Args:
operation (.Operation): The operation to be differentiated.
Returns:
array: the derivative of the operation as a matrix in the standard basis
Raises:
ValueError: if the operation does not have a generator or is not composed of a single
trainable parameter
"""
generator, prefactor = operation.generator
if generator is None:
raise ValueError(f"Operation {operation.name} does not have a generator")
if operation.num_params != 1:
# Note, this case should already be caught by the previous raise since we haven't worked out
# how to have an operator for multiple parameters. It is added here in case of a future
# change
raise ValueError(
f"Operation {operation.name} is not written in terms of a single parameter"
)
if not isinstance(generator, np.ndarray):
generator = generator.matrix
if operation.inverse:
prefactor *= -1
generator = generator.conj().T
return 1j * prefactor * generator @ operation.matrix
| 34.822235 | 122 | 0.603598 | [
"Apache-2.0"
] | DanielPolatajko/pennylane | pennylane/operation.py | 61,709 | Python |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import phylogeny
def main(argv):
lines = files.read_lines(argv[0])
taxa = lines[0].split()
table = lines[1:]
print '\n'.join('{%s, %s} {%s, %s}' % (a1, a2, b1, b2) for ((a1, a2), (b1, b2)) in phylogeny.quartets(taxa, table))
if __name__ == "__main__":
main(sys.argv[1:])
| 21.105263 | 119 | 0.610973 | [
"MIT"
] | cowboysmall-comp/rosalind | src/stronghold/rosalind_qrt.py | 401 | Python |
from __future__ import absolute_import, print_function
from django.conf import settings
CLIENT_ID = getattr(settings, "GITHUB_APP_ID", None)
CLIENT_SECRET = getattr(settings, "GITHUB_API_SECRET", None)
REQUIRE_VERIFIED_EMAIL = getattr(settings, "GITHUB_REQUIRE_VERIFIED_EMAIL", False)
ERR_NO_ORG_ACCESS = "You do not have access to the required GitHub organization."
ERR_NO_PRIMARY_EMAIL = (
"We were unable to find a primary email address associated with your GitHub account."
)
ERR_NO_SINGLE_PRIMARY_EMAIL = (
"We were unable to find a single primary email address associated with your GitHub account."
)
ERR_NO_VERIFIED_PRIMARY_EMAIL = (
"We were unable to find a verified, primary email address associated with your GitHub account."
)
ERR_NO_SINGLE_VERIFIED_PRIMARY_EMAIL = "We were unable to find a single verified, primary email address associated with your GitHub account."
# we request repo as we share scopes with the other GitHub integration
SCOPE = "user:email,read:org,repo"
# deprecated please use GITHUB_API_DOMAIN and GITHUB_BASE_DOMAIN
DOMAIN = getattr(settings, "GITHUB_DOMAIN", "api.github.com")
BASE_DOMAIN = getattr(settings, "GITHUB_BASE_DOMAIN", "github.com")
API_DOMAIN = getattr(settings, "GITHUB_API_DOMAIN", DOMAIN)
ACCESS_TOKEN_URL = "https://{0}/login/oauth/access_token".format(BASE_DOMAIN)
AUTHORIZE_URL = "https://{0}/login/oauth/authorize".format(BASE_DOMAIN)
| 37.289474 | 141 | 0.791814 | [
"BSD-3-Clause"
] | vaniot-s/sentry | src/sentry/auth/providers/github/constants.py | 1,417 | Python |
from OpenGL.GL import *
from .. GLGraphicsItem import GLGraphicsItem
from .. MeshData import MeshData
from pyqtgraph.Qt import QtGui
import pyqtgraph as pg
from .. import shaders
import numpy as np
__all__ = ['GLMeshItem']
class GLMeshItem(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
Displays a 3D triangle mesh.
"""
def __init__(self, faces, vertexes=None):
"""
See :class:`MeshData <pyqtgraph.opengl.MeshData>` for initialization arguments.
"""
if isinstance(faces, MeshData):
self.data = faces
else:
self.data = MeshData()
self.data.setFaces(faces, vertexes)
GLGraphicsItem.__init__(self)
def initializeGL(self):
self.shader = shaders.getShader('balloon')
l = glGenLists(1)
self.triList = l
glNewList(l, GL_COMPILE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable( GL_BLEND )
glEnable( GL_ALPHA_TEST )
#glAlphaFunc( GL_ALWAYS,0.5 )
glEnable( GL_POINT_SMOOTH )
glDisable( GL_DEPTH_TEST )
glColor4f(1, 1, 1, .1)
glBegin( GL_TRIANGLES )
for face in self.data:
for (pos, norm, color) in face:
glColor4f(*color)
glNormal3f(norm.x(), norm.y(), norm.z())
glVertex3f(pos.x(), pos.y(), pos.z())
glEnd()
glEndList()
#l = glGenLists(1)
#self.meshList = l
#glNewList(l, GL_COMPILE)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#glEnable( GL_BLEND )
#glEnable( GL_ALPHA_TEST )
##glAlphaFunc( GL_ALWAYS,0.5 )
#glEnable( GL_POINT_SMOOTH )
#glEnable( GL_DEPTH_TEST )
#glColor4f(1, 1, 1, .3)
#glBegin( GL_LINES )
#for f in self.faces:
#for i in [0,1,2]:
#j = (i+1) % 3
#glVertex3f(*f[i])
#glVertex3f(*f[j])
#glEnd()
#glEndList()
def paint(self):
shaders.glUseProgram(self.shader)
glCallList(self.triList)
shaders.glUseProgram(0)
#glCallList(self.meshList)
| 28.78481 | 87 | 0.559367 | [
"MIT"
] | robertsj/poropy | pyqtgraph/opengl/items/GLMeshItem.py | 2,274 | Python |
import os, sys
try:
import MacOS
except:
MacOS = None
from pygame.pkgdata import getResource
from pygame import sdlmain_osx
__all__ = ['Video_AutoInit']
def Video_AutoInit():
"""This is a function that's called from the c extension code
just before the display module is initialized"""
if MacOS and not MacOS.WMAvailable():
if not sdlmain_osx.WMEnable():
raise ImportError("Can not access the window manager. Use py2app or execute with the pythonw script.")
if not sdlmain_osx.RunningFromBundleWithNSApplication():
try:
default_icon_data = getResource('pygame_icon.tiff').read()
except IOError:
default_icon_data = None
except NotImplementedError:
default_icon_data = None
sdlmain_osx.InstallNSApplication(default_icon_data)
if (os.getcwd() == '/') and len(sys.argv) > 1:
os.chdir(os.path.dirname(sys.argv[0]))
return True
| 30.03125 | 115 | 0.676379 | [
"MIT"
] | AdamaTraore75020/PYBomber | venv/Lib/site-packages/pygame/macosx.py | 961 | Python |
import sys
from pathlib import Path, PurePath
sys.path.append("./models/research/object_detection/")
sys.path.append("./models/research/")
import os
import cv2
import numpy as np
import tensorflow as tf
from utils import label_map_util
from utils import visualization_utils as vis_util
from image_to_video_converter import images_to_video
from PIL import Image
class detector:
def __init__(self, model_directory):
model_path = os.path.join(model_directory, 'frozen_inference_graph.pb')
labelmap_path = os.path.join(model_directory, 'labelmap.pbtxt')
self.num_classes = 5
self.label_map = label_map_util.load_labelmap(labelmap_path)
self.categories = label_map_util.convert_label_map_to_categories(self.label_map,
max_num_classes=self.num_classes,
use_display_name=True)
self.category_index = label_map_util.create_category_index(self.categories)
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
self.od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
self.serialized_graph = fid.read()
self.od_graph_def.ParseFromString(self.serialized_graph)
tf.import_graph_def(self.od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def draw_boxes_for_image(self, frame, min_score_threshold):
frame_expanded = np.expand_dims(frame, axis=0)
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: frame_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=2,
min_score_thresh=min_score_threshold)
"""
print("Self cateogry index")
print(self.category_index)
print("Score/Classes")
for box, score, cls in zip(np.squeeze(boxes), np.squeeze(scores),np.squeeze(classes).astype(np.int32)):
print(score, cls, self.category_index[cls])
"""
good_boxes = [box
for box, score, cls in zip(np.squeeze(boxes), np.squeeze(scores), np.squeeze(classes).astype(np.int32))
if score >= min_score_threshold and 'traffic' not in self.category_index[cls]['name']]
return frame, good_boxes
@staticmethod
def denormalize(box, width, height):
# Order taken from: https://www.tensorflow.org/api_docs/python/tf/image/draw_bounding_boxes
y_min, x_min, y_max, x_max = box[0], box[1], box[2], box[3]
x_min *= width
x_max *= width
y_min *= height
y_max *= height
return [x_min, x_max, y_min, y_max]
@staticmethod
def log_boxes(frame_number, boxes, ofile, width, height):
for box in boxes:
box = detector.denormalize(box, width, height)
# Cast float coordinates to integers
box = map(int, box)
box = [frame_number] + list(box)
line = "|".join(map(str, box))
print(line, file=ofile)
def process_image(self, video_name, frame_number, image_path,
min_score_threshold, output_path, save_images):
image = cv2.imread(image_path)
image_name = Path(image_path).stem
result_frame = None
# Set up logging file
log_name = os.path.join(output_path, f"{video_name}_log.txt")
with open(log_name, 'a') as log_file:
print("At Frame:", frame_number)
frame = np.array(image)
# Draw boxes
frame, boxes = self.draw_boxes_for_image(frame, min_score_threshold)
height, width, layers = frame.shape
# Log boxes
detector.log_boxes(frame_number, boxes, log_file, width, height)
# Save frame with boxes for output
result_frame = frame
# Save frame with boxes
if save_images:
frame_path = os.path.join(output_path, f"{video_name}_frame_{image_name}.png")
print("Saving image at", frame_path)
vis_util.save_image_array_as_png(frame, frame_path)
return result_frame
def process_image_folder(self, folder_path, min_score_threshold, output_path, save_images):
folder_name = Path(folder_path).stem
frames = []
file_names = os.listdir(folder_path)
file_names.sort()
for f in file_names:
image_path = os.path.join(folder_path, f)
if os.path.isfile(image_path):
frame_number = len(frames)
next_frame = self.process_image(folder_name, frame_number, image_path,
min_score_threshold, output_path, save_images)
frames.append(next_frame)
if save_images:
video_path = os.path.join(output_path, folder_name)
video = cv2.VideoCapture(video_path)
print("Saving video at", video_path)
images_to_video(frames, video_path, 30)
def process_video(self, video_path, min_score_threshold, output_path, save_images):
video_name = Path(video_path).stem
# Open video file
video = cv2.VideoCapture(video_path)
# Set up logging file
log_name = os.path.join(output_path, f"{video_name}_log.txt")
with open(log_name, 'a') as log_file:
frames = []
while(video.isOpened()):
ret, frame = video.read()
if not ret:
break
frame_number = len(frames)
print("At Frame:", frame_number)
# Draw boxes
frame, boxes = self.draw_boxes_for_image(frame, min_score_threshold)
height, width, layers = frame.shape
# Log boxes
detector.log_boxes(frame_number, boxes, log_file, width, height)
# Save frame with boxes
if save_images:
frame_path = os.path.join(output_path, f"{video_name}_frame_{frame_number}.png")
print("Saving image at", frame_path)
vis_util.save_image_array_as_png(frame, frame_path)
frames.append(frame)
# Save as video
if save_images:
out_video_path = os.path.join(output_path, f"{video_name}.avi")
print("Saving video at", out_video_path)
images_to_video(frames, out_video_path, 30)
# Clean up
video.release()
cv2.destroyAllWindows()
def default_detector():
det = detector("./trained_model/detectors/")
return det
def default_inference():
det = default_detector()
det.process_video("./data/SignaledJunctionRightTurn_1.avi", 0.70, "./output/temp/", False)
return det
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', help='Path to the frozen inference graph and labelmap files',
required=True)
parser.add_argument('--video_path', help='Path to the video', required=True)
parser.add_argument('--min_threshold', type=float, help='Minimum score threshold for a bounding box to be drawn', default=0.7)
parser.add_argument('--output_path', help='Path for storing output images and/or logs', required=True)
parser.add_argument('--save_images', action='store_true')
args = parser.parse_args()
det = detector(args.model_path)
det.process_video(args.video_path, args.min_threshold, args.output_path, args.save_images)
| 47.212766 | 131 | 0.609283 | [
"Unlicense"
] | s-nandi/carla-car-detection | detection.py | 8,876 | Python |
#!/usr/bin/env python
#
# mri_convert_ppc64 ds ChRIS plugin app
#
# (c) 2016-2019 Fetal-Neonatal Neuroimaging & Developmental Science Center
# Boston Children's Hospital
#
# http://childrenshospital.org/FNNDSC/
# [email protected]
#
import os
import sys
sys.path.append(os.path.dirname(__file__))
# import the Chris app superclass
from chrisapp.base import ChrisApp
Gstr_title = """
_ _ ____ ___
(_) | | / ___| / |
_ __ ___ _ __ _ ___ ___ _ ____ _____ _ __| |_ _ __ _ __ ___/ /___ / /| |
| '_ ` _ \| '__| | / __/ _ \| '_ \ \ / / _ \ '__| __| | '_ \| '_ \ / __| ___ \/ /_| |
| | | | | | | | || (_| (_) | | | \ V / __/ | | |_ | |_) | |_) | (__| \_/ |\___ |
|_| |_| |_|_| |_| \___\___/|_| |_|\_/ \___|_| \__| | .__/| .__/ \___\_____/ |_/
______ ______| | | |
|______| |______|_| |_|
"""
Gstr_synopsis = """
NAME
mri_convert_ppc64.py
SYNOPSIS
python mri_convert_ppc64.py \\
[-h] [--help] \\
[--json] \\
[--man] \\
[--meta] \\
[--savejson <DIR>] \\
[-v <level>] [--verbosity <level>] \\
[--version] \\
[--inputFile <inputFile>] \\
[--outputFile <outputFile>] \\
[--executable <executable>] \\
[--execArgs <execArgs>] \\
<inputDir> \\
<outputDir>
BRIEF EXAMPLE
* Bare bones execution
mkdir in out && chmod 777 out
python mri_convert_ppc64.py \\
in out
DESCRIPTION
`mri_convert_ppc64.py` calls an underlying executable
(typically 'mri_convert') and passes it an input and output spec.
ARGS
[--inputFile <inputFile>]
The input file, relative to <inputDir>.
[--outputFile <outputFile>]
The output file, relative to <outpufDir>.
[--executable <executable>]
The actual executable to run.
[--execArgs <execArgs>]
Additional executable-specific command line args.
[-h] [--help]
If specified, show help message and exit.
[--json]
If specified, show json representation of app and exit.
[--man]
If specified, print (this) man page and exit.
[--meta]
If specified, print plugin meta data and exit.
[--savejson <DIR>]
If specified, save json representation file to DIR and exit.
[-v <level>] [--verbosity <level>]
Verbosity level for app. Not used currently.
[--version]
If specified, print version number and exit.
"""
class Mri_convert_ppc64(ChrisApp):
"""
This calls a pre-built PPC64 'mri_convert' that is housed in a base container..
"""
AUTHORS = 'BU-2019-Power9 ([email protected])'
SELFPATH = os.path.dirname(os.path.abspath(__file__))
SELFEXEC = os.path.basename(__file__)
EXECSHELL = 'python3'
TITLE = 'A PowerPPC plugin to run the FreeSurfer mri_convert'
CATEGORY = ''
TYPE = 'ds'
DESCRIPTION = 'This calls a pre-built PPC64 mri_convert that is housed in a base container.'
DOCUMENTATION = 'http://wiki'
VERSION = '0.1'
ICON = '' # url of an icon image
LICENSE = 'Opensource (MIT)'
MAX_NUMBER_OF_WORKERS = 1 # Override with integer value
MIN_NUMBER_OF_WORKERS = 1 # Override with integer value
MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MIN_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin
MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin
# Use this dictionary structure to provide key-value output descriptive information
# that may be useful for the next downstream plugin. For example:
#
# {
# "finalOutputFile": "final/file.out",
# "viewer": "genericTextViewer",
# }
#
# The above dictionary is saved when plugin is called with a ``--saveoutputmeta``
# flag. Note also that all file paths are relative to the system specified
# output directory.
OUTPUT_META_DICT = {}
def define_parameters(self):
"""
Define the CLI arguments accepted by this plugin app.
Use self.add_argument to specify a new app argument.
"""
self.add_argument('--executable',
dest = 'executable',
type = str,
optional = True,
help = 'the conversion program to use',
default = '/usr/bin/mri_convert')
self.add_argument('--inputFile',
dest = 'inputFile',
type = str,
optional = True,
help = 'the input file',
default = '')
self.add_argument('--outputFile',
dest = 'outputFile',
type = str,
optional = True,
help = 'the output file',
default = '')
self.add_argument('--execArgs',
dest = 'execArgs',
type = str,
optional = True,
help = 'additonal arguments for the chosen executable',
default = '')
def run(self, options):
"""
Define the code to be run by this plugin app.
"""
if not len(options.inputFile):
print("ERROR: No input file has been specified!")
print("You must specify an input file relative to the input directory.")
sys.exit(1)
if not len(options.outputFile):
print("ERROR: No output file has been specified!")
print("You must specicy an output file relative to the output directory.")
sys.exit(1)
str_cmd = '%s %s %s/%s %s/%s' % ( options.executable,
options.execArgs,
options.inputdir,
options.inputFile,
options.outputdir,
options.outputFile)
os.system(str_cmd)
def show_man_page(self):
"""
Print the app's man page.
"""
print(Gstr_title)
print(Gstr_synopsis)
# ENTRYPOINT
if __name__ == "__main__":
chris_app = Mri_convert_ppc64()
chris_app.launch()
| 39.677273 | 108 | 0.414251 | [
"MIT"
] | quinnyyy/pl-mri_convert_ppc64 | mri_convert_ppc64/mri_convert_ppc64.py | 8,729 | Python |
from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
return frozenset(res)
| 35.9375 | 89 | 0.628406 | [
"MIT"
] | EnricoMagnago/F3 | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/12-extending_bound_39.py | 8,625 | Python |
import json
import os.path
tags = {
'1.0': '1__0__3',
'1.1': '1__1__4',
}
schema_url = 'https://standard.open-contracting.org/schema/{}/release-schema.json'
def path(filename):
return os.path.join('tests', 'fixtures', filename)
def read(filename, mode='rt', encoding=None, **kwargs):
with open(path(filename), mode, encoding=encoding, **kwargs) as f:
return f.read()
def load(*args, **kwargs):
return json.loads(read(*args, **kwargs))
| 20.478261 | 82 | 0.647558 | [
"BSD-3-Clause"
] | open-contracting/ocds-merge | tests/__init__.py | 471 | Python |
import numpy as np
from gym import utils
from gym_env_mujoco150 import mujoco_env
import mujoco_py
class Walker2dEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "walker2d_150.xml", 4)
utils.EzPickle.__init__(self)
def _step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = ((posafter - posbefore) / self.dt)
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
done = not (height > 0.8 and height < 2.0 and
ang > -1.0 and ang < 1.0)
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.type = mujoco_py.const.CAMERA_TRACKING
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] = .8
self.viewer.cam.elevation = -20
| 34.837209 | 94 | 0.615487 | [
"MIT"
] | pfnet/gym-env-mujoco150 | gym_env_mujoco150/walker2d.py | 1,498 | Python |
# MIT License
# Copyright (c) 2018 the NJUNMT-pytorch authors.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import random
import time
from copy import deepcopy
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from src.data.data_iterator import DataIterator
from src.data.dataset import TextLineDataset, ZipDataset
from src.data.vocabulary import Vocabulary
from src.decoding import beam_search, ensemble_beam_search
from src.decoding.beam_search import nmt_lm_fusion_beam_search
from src.metric.bleu_scorer import SacreBLEUScorer
from src.models import build_model
from src.modules.criterions import NMTCriterion
from src.optim import Optimizer
from src.optim.lr_scheduler import ReduceOnPlateauScheduler, NoamScheduler, RsqrtScheduler
from src.utils.common_utils import *
from src.utils.configs import default_configs, pretty_configs
from src.utils.logging import *
from src.utils.moving_average import MovingAverage
BOS = Vocabulary.BOS
EOS = Vocabulary.EOS
PAD = Vocabulary.PAD
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_model_parameters(path, map_location="cpu"):
state_dict = torch.load(path, map_location=map_location)
if "model" in state_dict:
return state_dict["model"]
return state_dict
def split_shard(*inputs, split_size=1):
if split_size <= 1:
yield inputs
else:
lengths = [len(s) for s in inputs[-1]] #
sorted_indices = np.argsort(lengths)
# sorting inputs
inputs = [
[inp[ii] for ii in sorted_indices]
for inp in inputs
]
# split shards
total_batch = sorted_indices.shape[0] # total number of batches
if split_size >= total_batch:
yield inputs
else:
shard_size = total_batch // split_size
_indices = list(range(total_batch))[::shard_size] + [total_batch]
for beg, end in zip(_indices[:-1], _indices[1:]):
yield (inp[beg:end] for inp in inputs)
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
Args:
eval ('bool'): indicator for eval/infer.
Returns:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def compute_forward(model,
critic,
seqs_x,
eval=False,
normalization=1.0,
norm_by_words=False
):
"""
:type model: nn.Module
:type critic: NMTCriterion
"""
x_inp = seqs_x[:, :-1].contiguous()
x_label = seqs_x[:, 1:].contiguous()
words_norm = x_label.ne(PAD).float().sum(1)
if not eval:
model.train()
critic.train()
# For training
with torch.enable_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, reduce=False,
normalization=normalization)
if norm_by_words:
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
# For compute loss
with torch.no_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, normalization=normalization, reduce=True)
return loss.item()
def loss_validation(model, critic, valid_iterator):
"""
:type model: Transformer
:type critic: NMTCriterion
:type valid_iterator: DataIterator
"""
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
_, seqs_x = batch
n_sents += len(seqs_x)
n_tokens += sum(len(s) for s in seqs_x)
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model,
critic=critic,
seqs_x=x,
eval=True)
if np.isnan(loss):
WARN("NaN detected!")
sum_loss += float(loss)
return float(sum_loss / n_sents)
def bleu_validation(uidx,
valid_iterator,
model,
bleu_scorer,
vocab_tgt,
batch_size,
valid_dir="./valid",
max_steps=10,
beam_size=5,
alpha=-1.0
):
model.eval()
numbers = []
trans = []
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator(batch_size=batch_size)
for batch in valid_iter:
seq_nums = batch[0]
numbers += seq_nums
seqs_x = batch[1]
infer_progress_bar.update(len(seqs_x))
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = beam_search(nmt_model=model,
beam_size=beam_size,
max_steps=max_steps,
src_seqs=x, alpha=alpha)
word_ids = word_ids.cpu().numpy().tolist()
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
x_tokens = []
for wid in sent_t[0]:
if wid == EOS:
break
x_tokens.append(vocab_tgt.id2token(wid))
if len(x_tokens) > 0:
trans.append(vocab_tgt.tokenizer.detokenize(x_tokens))
else:
trans.append('%s' % vocab_tgt.id2token(EOS))
origin_order = np.argsort(numbers).tolist()
trans = [trans[ii] for ii in origin_order]
infer_progress_bar.close()
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
hyp_path = os.path.join(valid_dir, 'trans.iter{0}.txt'.format(uidx))
with open(hyp_path, 'w') as f:
for line in trans:
f.write('%s\n' % line)
with open(hyp_path) as f:
bleu_v = bleu_scorer.corpus_bleu(f)
return bleu_v
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
"""
Args:
nmt_model: model.
pretrain_path ('str'): path to pretrained model.
map_dict ('dict'): mapping specific parameter names to those names
in current model.
exclude_prefix ('dict'): excluding parameters with specific names
for pretraining.
Raises:
ValueError: Size not match, parameter name not match or others.
"""
if exclude_prefix is None:
exclude_prefix = []
if pretrain_path != "":
INFO("Loading pretrained model from {}".format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for name, params in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO("Loading param: {}...".format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN("{}: {}".format(str(Exception), e))
INFO("Pretrained model loaded.")
def train(FLAGS):
"""
FLAGS:
saveto: str
reload: store_true
config_path: str
pretrain_path: str, default=""
model_name: str
log_path: str
"""
# write log of training to file.
write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S")))
GlobalNames.USE_GPU = FLAGS.use_gpu
if GlobalNames.USE_GPU:
CURRENT_DEVICE = "cpu"
else:
CURRENT_DEVICE = "cuda:0"
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
# Add default configs
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX)
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"])
train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"])
train_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['train_data'][0],
vocabulary=vocab_src,
max_len=data_configs['max_len'][0],
),
shuffle=training_configs['shuffle']
)
valid_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['valid_data'][0],
vocabulary=vocab_src,
),
)
training_iterator = DataIterator(dataset=train_bitext_dataset,
batch_size=train_batch_size,
use_bucket=training_configs['use_bucket'],
buffer_size=train_buffer_size,
batching_func=training_configs['batching_key'])
valid_iterator = DataIterator(dataset=valid_bitext_dataset,
batch_size=training_configs['valid_batch_size'],
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
# ================================ Begin ======================================== #
# Build Model & Optimizer
# We would do steps below on after another
# 1. build models & criterion
# 2. move models & criterion to gpu if needed
# 3. load pre-trained model if needed
# 4. build optimizer
# 5. build learning rate scheduler if needed
# 6. load checkpoints if needed
# 0. Initial
model_collections = Collections()
checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs['num_kept_checkpoints']
)
best_model_saver = BestKSaver(save_prefix="{0}.best".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs["num_kept_best_checkpoints"])
# 1. Build Model & Criterion
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_words=vocab_src.max_n_words, **model_configs)
INFO(nmt_model)
params_total = sum([p.numel() for n, p in nmt_model.named_parameters()])
params_with_embedding = sum([p.numel() for n, p in nmt_model.named_parameters() if n.find('embedding') == -1])
INFO('Total parameters: {}'.format(params_total))
INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding))
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# 2. Move to GPU
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
# 3. Load pretrained model if needed
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
# 4. Build optimizer
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'],
model=nmt_model,
lr=lrate,
grad_clip=optimizer_configs['grad_clip'],
optim_args=optimizer_configs['optimizer_params']
)
# 5. Build scheduler for optimizer if needed
if optimizer_configs['schedule_method'] is not None:
if optimizer_configs['schedule_method'] == "loss":
scheduler = ReduceOnPlateauScheduler(optimizer=optim,
**optimizer_configs["scheduler_configs"]
)
elif optimizer_configs['schedule_method'] == "noam":
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif optimizer_configs["schedule_method"] == "rsqrt":
scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
else:
WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
# 6. build moving average
if training_configs['moving_average_method'] is not None:
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'],
named_params=nmt_model.named_parameters(),
alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# Reload from latest checkpoint
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
# ================================================================================== #
# Prepare training
eidx = model_collections.get_collection("eidx", [0])[-1]
uidx = model_collections.get_collection("uidx", [0])[-1]
bad_count = model_collections.get_collection("bad_count", [0])[-1]
oom_count = model_collections.get_collection("oom_count", [0])[-1]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
valid_loss = best_valid_loss = float('inf') # Max Float
saving_files = []
# Timer for computing speed
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar("Epoch", (eidx + 1), uidx)
# Build iterator and progress bar
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx),
total=len(training_iterator),
unit="sents"
)
for batch in training_iter:
uidx += 1
if optimizer_configs["schedule_method"] is not None and optimizer_configs["schedule_method"] != "loss":
scheduler.step(global_step=uidx)
seqs_x = batch
n_samples_t = len(seqs_x)
n_words_t = sum(len(s) for s in seqs_x)
cum_samples += n_samples_t
cum_words += n_words_t
train_loss = 0.
optim.zero_grad()
try:
# Prepare data
for seqs_x_t, in split_shard(seqs_x, split_size=training_configs['update_cycle']):
x = prepare_data(seqs_x_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model,
critic=critic,
seqs_x=x,
eval=False,
normalization=n_samples_t,
norm_by_words=training_configs["norm_by_words"])
train_loss += loss / x.size(1)
optim.step()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ma is not None and eidx >= training_configs['moving_average_start_epoch']:
ma.step()
training_progress_bar.update(n_samples_t)
training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx))
training_progress_bar.set_postfix_str(
'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss))
summary_writer.add_scalar("train_loss", scalar_value=train_loss, global_step=uidx)
# ================================================================================== #
# Display some information
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
# words per second and sents per second
words_per_sec = cum_words / (timer.toc(return_seconds=True))
sents_per_sec = cum_samples / (timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx)
# Reset timer
timer.tic()
cum_words = 0
cum_samples = 0
# ================================================================================== #
# Loss Validation & Learning rate annealing
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model,
critic=critic,
valid_iterator=valid_iterator,
)
model_collections.add_to_collection("history_losses", valid_loss)
min_history_loss = np.array(model_collections.get_collection("history_losses")).min()
summary_writer.add_scalar("loss", valid_loss, global_step=uidx)
summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if optimizer_configs["schedule_method"] == "loss":
scheduler.step(global_step=uidx, metric=best_valid_loss)
# If model get new best valid bleu score
if valid_loss < best_valid_loss:
bad_count = 0
if is_early_stop is False:
# 1. save the best model's parameters
torch.save(nmt_model.state_dict(), best_model_prefix + ".final")
# 2. save the best checkpoint
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
best_model_saver.save(global_step=uidx, metric=valid_loss,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
else:
bad_count += 1
# At least one epoch should be traversed
if bad_count >= training_configs['early_stop_patience'] and eidx > 0:
is_early_stop = True
WARN("Early Stop!")
summary_writer.add_scalar("bad_count", bad_count, uidx)
INFO("{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}".format(
uidx, valid_loss, lrate, bad_count
))
# ================================================================================== #
# Saving checkpoints
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
if not is_early_stop:
checkpoint_saver.save(global_step=uidx,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
training_progress_bar.close()
eidx += 1
if eidx > training_configs["max_epochs"]:
break
def nmt_lm_fusion_translate(FLAGS):
GlobalNames.USE_GPU = FLAGS.use_gpu
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs['data_configs']
nmt_model_configs = configs['nmt_model_configs']
lm_model_configs = configs['lm_model_configs']
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
valid_dataset = TextLineDataset(data_path=FLAGS.source_path,
vocabulary=vocab_src)
valid_iterator = DataIterator(dataset=valid_dataset,
batch_size=FLAGS.batch_size,
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# ================================================================================== #
# Build Model & Sampler & Validation
INFO('Building model...')
timer.tic()
nmt_model_path = FLAGS.nmt_model_path
lm_model_path = FLAGS.lm_model_path
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **nmt_model_configs)
lm_model = build_model(n_words=vocab_tgt.max_n_words, **lm_model_configs)
nmt_model.eval()
lm_model.eval()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Reloading model parameters...')
timer.tic()
nmt_params = load_model_parameters(nmt_model_path, map_location="cpu")
lm_params = load_model_parameters(lm_model_path, map_location="cpu")
nmt_model.load_state_dict(nmt_params)
lm_model.load_state_dict(lm_params)
if GlobalNames.USE_GPU:
nmt_model.cuda()
lm_model.cuda()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Begin...')
result_numbers = []
result = []
n_words = 0
timer.tic()
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
numbers, seqs_x = batch
batch_size_t = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = nmt_lm_fusion_beam_search(nmt_model=nmt_model, lm_model=lm_model,
beam_size=FLAGS.beam_size,
max_steps=FLAGS.max_steps,
src_seqs=x,
alpha=FLAGS.alpha,
beta=FLAGS.beta)
word_ids = word_ids.cpu().numpy().tolist()
result_numbers += numbers
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
result.append(sent_t)
n_words += len(sent_t[0])
infer_progress_bar.update(batch_size_t)
infer_progress_bar.close()
INFO('Done. Speed: {0:.2f} words/sec'.format(n_words / (timer.toc(return_seconds=True))))
translation = []
for sent in result:
samples = []
for trans in sent:
sample = []
for w in trans:
if w == vocab_tgt.EOS:
break
sample.append(vocab_tgt.id2token(w))
samples.append(vocab_tgt.tokenizer.detokenize(sample))
translation.append(samples)
# resume the ordering
origin_order = np.argsort(result_numbers).tolist()
translation = [translation[ii] for ii in origin_order]
with open(FLAGS.saveto, 'w') as f:
for trans in translation:
f.write("%s\n"%trans[0])
if __name__ == '__main__':
_args = {
"model_name": "test_rnnlm",
"reload": False,
"config_path": "./configs/test_rnnlm.yaml",
"debug": True,
"use_gpu": False,
"task": "lm",
"log_path": "/tmp",
"saveto": "/tmp",
"valid_path": "/tmp",
}
from src.bin import train as _train
_train.run(**_args) | 34.516827 | 120 | 0.566683 | [
"MIT"
] | skysky77/MGNMT | src/tasks/lm.py | 28,718 | Python |
import click
from . import __version__
from .cli_commands import create_cluster, upload, upload_and_update
from .configure import configure
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Version {}'.format(__version__))
ctx.exit()
@click.group()
@click.option('--version', '-v', is_flag=True, callback=print_version,
help=__version__)
def cli(version):
pass
cli.add_command(configure)
cli.add_command(create_cluster)
cli.add_command(upload)
cli.add_command(upload_and_update)
| 21.923077 | 70 | 0.738596 | [
"BSD-3-Clause"
] | ShopRunner/apparate | stork/cli.py | 570 | Python |
"""Class definition of the ZoneSpeaker."""
import bisect
import functools
from typing import Any, Callable, List, Tuple
import simulation_groundtruth.srv as groundtruth_srv
from simulation_evaluation.msg import Speaker as SpeakerMsg
from simulation_groundtruth.msg import LabeledPolygon as LabeledPolygonMsg
from simulation_groundtruth.msg import Lane as LaneMsg
from simulation_groundtruth.msg import Section as SectionMsg
import simulation.utils.road.sections.type as road_section_type
from simulation.utils.geometry import Point
from simulation.utils.road.sections import SurfaceMarking
from .speaker import Speaker
class ZoneSpeaker(Speaker):
"""Information about the zone of the road the car is in."""
def __init__(
self,
*,
section_proxy: Callable[[], List[SectionMsg]],
lane_proxy: Callable[[int], LaneMsg],
obstacle_proxy: Callable[[int], List[LabeledPolygonMsg]],
surface_marking_proxy: Callable[[int], List[LabeledPolygonMsg]],
parking_proxy: Callable[[int], Any],
intersection_proxy: Callable[[int], Any],
overtaking_buffer: float = 2,
start_zone_buffer: float = 1,
end_zone_buffer: float = 1.5,
yield_distance: Tuple[float, float] = (-0.6, -0.2),
):
"""Initialize zone speaker.
Args:
section_proxy: Returns all sections when called.
lane_proxy: Returns a LaneMsg for each section.
obstacle_proxy: function which returns obstacles in a section.
surface_marking_proxy: function which returns surface_markings in a section.
parking_proxy: function which returns parking msg in a section.
intersection_proxy: function which returns intersection msg in a section.
parking_spot_buffer: buffer around parking spots in which a parking attempt
is also accepted
overtaking_buffer: buffer around obstacles that the car is allowed to overtake
start_zone_buffer: beginning of the road that is considered as a start zone
end_zone_buffer: end of the road that is considered as the end
yield_distance: interval before intersections that the vehicle must yield in
"""
super().__init__(
section_proxy=section_proxy,
lane_proxy=lane_proxy,
obstacle_proxy=obstacle_proxy,
surface_marking_proxy=surface_marking_proxy,
intersection_proxy=intersection_proxy,
)
self.get_parking_msgs = parking_proxy
self.overtaking_buffer = overtaking_buffer
self.start_zone_buffer = start_zone_buffer
self.end_zone_buffer = end_zone_buffer
self.yield_distance = yield_distance
# Get total length.
self.total_length = self.middle_line.length
@functools.cached_property
def overtaking_zones(self) -> List[Tuple[float, float]]:
"""Intervals in which the car is allowed to overtake along the
:py:attr:`Speaker.middle_line`."""
# Get all obstacle polygons
obstacles = list(
lp.frame
for sec in self.sections
if sec.type != road_section_type.PARKING_AREA
for lp in self.get_obstacles_in_section(sec.id)
)
# Get blocked area polygons because the car
# is allowed to drive onto the left lane there!
surface_markings = list(
surface_marking
for sec in self.sections
for surface_marking in self.get_surface_markings_in_section(sec.id)
)
blocked_areas = [
sm.frame for sm in surface_markings if sm.id_ == SurfaceMarking.BLOCKED_AREA[0]
]
# Intervals where polygons are along the middle line
intervals = list(
self.get_interval_for_polygon(obs) for obs in (obstacles + blocked_areas)
)
if len(intervals) == 0:
return []
zone_intervals = [
(
intervals[0][0] - self.overtaking_buffer,
intervals[0][1] + self.overtaking_buffer,
)
]
for start, end in intervals[1:]:
last = zone_intervals[-1]
# If the start of this section and end of the last overtaking zone
# overlap the last interval is extended
if start - self.overtaking_buffer < last[1]:
zone_intervals[-1] = (last[0], end + self.overtaking_buffer)
# Else a new interval is added
else:
zone_intervals.append(
(start - self.overtaking_buffer, end + self.overtaking_buffer)
)
# import rospy
# rospy.loginfo(f"Obstacle zones: {zone_intervals}")
return zone_intervals
def _intersection_yield_zones(self, rule: int) -> List[Tuple[float, float]]:
"""Intervals in which the car is supposed to halt/stop (in front of intersections).
Args:
rule: only intersections with this rule are considered
"""
intervals = []
for sec in self.sections:
if sec.type != road_section_type.INTERSECTION:
continue
# Get arclength of the last point of the middle line
# at the intersection south opening
intersection_msg = self.get_intersection(sec.id)
arc_length = self.middle_line.project(
Point(intersection_msg.south.middle_line[-1])
)
if intersection_msg.rule == rule:
intervals.append(
(
arc_length + self.yield_distance[0],
arc_length + self.yield_distance[1],
)
)
return intervals
@functools.cached_property
def stop_zones(self) -> List[Tuple[float, float]]:
"""Intervals in which the car is supposed to stop (in front of intersections)."""
return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.STOP)
@functools.cached_property
def halt_zones(self) -> List[Tuple[float, float]]:
"""Intervals in which the car is supposed to halt (in front of intersections)."""
return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.YIELD)
@functools.cached_property
def speed_zones(self) -> List[Tuple[float, int]]:
surface_markings = [
self.get_surface_markings_in_section(sec.id) for sec in self.sections
]
surface_markings = [marking for sublist in surface_markings for marking in sublist]
result = []
result.append((0, SpeakerMsg.SPEED_UNLIMITED_ZONE))
for marking in surface_markings:
if (
SurfaceMarking.ZONE_10_START[0]
<= marking.id_
<= SurfaceMarking.ZONE_90_START[0]
):
limit = 10 * (marking.id_ - SurfaceMarking.ZONE_10_START[0] + 1)
result.append(
(
self.get_interval_for_polygon(marking.frame)[0],
getattr(SpeakerMsg, f"SPEED_{limit}_ZONE"),
)
)
if (
SurfaceMarking.ZONE_10_END[0]
<= marking.id_
<= SurfaceMarking.ZONE_90_END[0]
):
result.append(
(
self.get_interval_for_polygon(marking.frame)[0],
SpeakerMsg.SPEED_UNLIMITED_ZONE,
)
)
return sorted(result, key=lambda tup: tup[0])
def _inside_any_interval(self, intervals: List[Tuple[float, float]]) -> bool:
"""Determine if the car is currently in any of the given intervals."""
beginnings = list(interval[0] for interval in intervals)
endings = list(interval[1] for interval in intervals)
b_idx = bisect.bisect_left(beginnings, self.arc_length) - 1
e_idx = bisect.bisect_left(endings, self.arc_length) - 1
# If the vehicle is in interval x then the beginning is before x
# and ending is behind x
return b_idx - e_idx == 1
def speak(self) -> List[SpeakerMsg]:
"""List of speaker msgs.
Contents:
* beginning of road -> :ref:`Speaker <speaker_msg>`.START_ZONE,
end of road -> :ref:`Speaker <speaker_msg>`.END_ZONE,
and in between -> :ref:`Speaker <speaker_msg>`.DRIVING_ZONE,
* close to an obstacle -> :ref:`Speaker <speaker_msg>`.OVERTAKING_ZONE
* before yield/stop lines \
-> :ref:`Speaker <speaker_msg>`.HALT_ZONE/SpeakerMsg.STOP_ZONE,
* parking area -> :ref:`Speaker <speaker_msg>`.PARKING_ZONE
"""
msgs = super().speak()
def append_msg(t: int):
msg = SpeakerMsg()
msg.type = t
msgs.append(msg)
# Determine if car is in parking zone
append_msg(
SpeakerMsg.PARKING_ZONE
if self.current_section.type == road_section_type.PARKING_AREA
else SpeakerMsg.NO_PARKING_ZONE
)
# Overtaking
append_msg(
SpeakerMsg.OVERTAKING_ZONE
if self._inside_any_interval(self.overtaking_zones)
else SpeakerMsg.NO_OVERTAKING_ZONE
)
# Start/End zone
if self.arc_length < self.start_zone_buffer:
append_msg(SpeakerMsg.START_ZONE)
elif self.arc_length + self.end_zone_buffer < self.total_length:
append_msg(SpeakerMsg.DRIVING_ZONE)
else:
append_msg(SpeakerMsg.END_ZONE)
# Stop / halt zone
if self._inside_any_interval(self.halt_zones):
append_msg(SpeakerMsg.HALT_ZONE)
elif self._inside_any_interval(self.stop_zones):
append_msg(SpeakerMsg.STOP_ZONE)
else:
append_msg(SpeakerMsg.NO_STOP_ZONE)
# Speed zone
for x, msg in reversed(self.speed_zones):
if x + 0.5 < self.arc_length: # 50cm Threshold
append_msg(msg)
break
return msgs
| 38.588679 | 92 | 0.612263 | [
"MIT"
] | KITcar-Team/kitcar-gazebo-simulation | simulation/src/simulation_evaluation/src/speaker/speakers/zone.py | 10,226 | Python |
import argparse
import xml.etree.cElementTree as etree
import os
from os import listdir
from os.path import isfile, join
import random
def processMedlineFolder(medlineFolder,outFolder):
"""Basic function that iterates through abstracts in a medline file, do a basic word count and save to a file
Args:
medlineFolder (folder): Medline XML folder containing abstracts
outFolder (folder): Folder to save output data to
Returns:
Nothing
"""
abstractCount = 0
# List of all files in the directory
files = [ f for f in listdir(medlineFolder) if isfile(join(medlineFolder, f)) ]
# Filter for only XML files
files = sorted([ f for f in files if f.endswith('xml') ])
outfile = join(outFolder,"countWordsError.txt")
with open(outfile, "a") as result:
# Iterate over all files
for f in files:
print("Processing %s" % f)
fullpath = join(medlineFolder,f)
# Iterate through the XML file and stop on each MedlineCitation
for event, elem in etree.iterparse(fullpath, events=('start', 'end', 'start-ns', 'end-ns')):
if (event=='end' and elem.tag=='MedlineCitation'):
# Let's get the PMID and Abstract elements from the XML
pmidElements = elem.findall('./PMID')
abstractElements = elem.findall('./Article/Abstract/AbstractText')
if len(pmidElements) != 1 or len(abstractElements) != 1:
continue
# Pull the values of the PMID and abstract elements
pmid = pmidElements[0].text
abstract = abstractElements[0].text
if not abstract is None:
# Do a very basic word count
wordCount = len(abstract.split())
# Prepare and save output to file
line = "%s\t%d\n" % (pmid,wordCount)
result.write(line)
abstractCount += 1
print("%d abstracts processed" % abstractCount)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Little toy example to "process" a Medline abstract file and gives naive word counts for each abstract')
parser.add_argument('-i',required=True,help='Medline folder to process')
parser.add_argument('-o',required=True,help='Output folder for word-counts')
args = parser.parse_args()
# Sometimes throw an error
if random.random() > 0.5:
raise RuntimeError("This sometimes throws an error")
processMedlineFolder(args.i,args.o)
| 31.273973 | 150 | 0.711345 | [
"MIT"
] | NCBI-Hackathons/Autoupdating_PubMed_Corpus_for_NLP | server/tools/CountWordsError/0.1/CountWordsError.py | 2,283 | Python |
from app01.apis.geng import bp as geng_bp
from app01.apis.end1.view_end1 import bp as end1_bp
routers = [
geng_bp,
end1_bp,
]
| 16.875 | 51 | 0.725926 | [
"MIT"
] | pscly/myend | app01/route.py | 135 | Python |
import json
import yaml
"""
SMock -- Serverboards Mock library -- Mock comfortably.
This library helps to mock function and method calls, getting the data
from an external yaml file.
"""
class MockWrapper:
"""
Wraps all the data returned by the mocked function to behave like a
dictionary, like an object, like a function, like a jsonable dict...
like almost everything you may need
"""
def __init__(self, data):
self.__data = data
def __getattr__(self, key):
if key not in self.__data:
raise KeyError("'%s' not found in %s" % (key, self.__data.keys()))
return self.__getitem__(key)
def __call__(self):
return wrapped(self.__data)
def __getitem__(self, key):
val = self.__data[key]
if isinstance(val, (int, str)):
return val
return wrapped(val)
def __str__(self):
return str(self.__data)
def __repr__(self):
return repr(self.__data)
def __eq__(self, other):
return self.__data.__eq__(other)
def __le__(self, other):
return self.__data.__le__(other)
def __ge__(self, other):
return self.__data.__ge__(other)
def __lt__(self, other):
return self.__data.__lt__(other)
def __gt__(self, other):
return self.__data.__gt__(other)
def __len__(self):
return self.__data.__len__()
def keys(self):
return self.__data.keys()
def get(self, key, defv=None):
return self.__data.get(key, defv)
class MockWrapperList(MockWrapper, list):
def __init__(self, data):
MockWrapper.__init__(self, data)
list.__init__(self, data)
class MockWrapperDict(MockWrapper, dict):
def __init__(self, data):
MockWrapper.__init__(self, data)
dict.__init__(self, data)
def wrapped(data):
if isinstance(data, dict):
return MockWrapperDict(data)
if isinstance(data, list):
return MockWrapperList(data)
return MockWrapper(data)
def mock_match(A, B):
"""
Checked for params on a mocked function is as expected
It is necesary as sometimes we get a tuple and at the mock data we have
lists.
Examples:
```
>>> mock_match("A", "A")
True
>>> mock_match("A", "B")
False
>>> mock_match(["A", "B", "C"], ["A", "B", "C"])
True
>>> mock_match(["A", "B", "C"], "*")
True
```
"""
if B == '*': # always match
return True
if isinstance(A, (tuple, list)):
return all(mock_match(a, b) for (a, b) in zip(A, B))
return A == B
def mock_res(name, data, args=[], kwargs={}):
"""
Given a name, data and call parameters, returns the mocked response
If there is no matching response, raises an exception that can be used to
prepare the mock data.
This can be used for situations where you mock some function like data;
for example at [Serverboards](https://serverboards.io), we use it to
mock RPC calls.
Its also used internally on every other mocking.
"""
data = data.get(name)
if not data:
raise Exception(
"unknown method for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % (
name, json.dumps(args), json.dumps(kwargs)
)
)
for res in data:
if (mock_match(args, res.get("args")) and
mock_match(kwargs, res.get("kwargs", {}))):
if 'error' in res:
raise Exception(res["error"])
response = res["response"]
if isinstance(response, (int, str)):
return response
return wrapped(response)
raise Exception(
"unknown data for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % (
name, json.dumps(args), json.dumps(kwargs)
)
)
def mock_method(name, data):
"""
Returns a function that mocks an original function.
"""
def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
def mock_method_async(name, data):
"""
Returns an async function that mocks an original async function
"""
async def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
class SMock:
"""
Encapsulates mocking calls so it's easier to load data and mock methods
Example:
```python
>>> import requests
>>> smocked = SMock("tests/data.yaml")
>>> requests.get = smocked.mock_method("requests.get")
>>> res = requests.get("https://mocked.url")
>>> res.status_code
200
>>> res.content
'Gocha!'
>>> res.json()
{'text': 'Gocha too!'}
```
The mock file is a yaml file with each mocked function as keys, and
`args`/`kwargs` as calling args and kwargs, and `response` the response.
Check `tests/data.yaml` for an example at the source code.
"""
def __init__(self, mockfile):
with open(mockfile) as fd:
self._data = yaml.load(fd)
def mock_res(self, name, args=[], kwargs={}):
"""
Calls `mock_res`
Mock by args:
```
>>> smock = SMock("tests/data.yaml")
>>> res = smock.mock_res("requests.get", ["https://mocked.url"])
>>> res.status_code
200
```
Using "*" as args, as fallback. As there is no kwargs, use default:
```
>>> res = smock.mock_res("requests.get", ["https://error.mocked.url"])
>>> res.status_code
404
```
Using "*" as kwargs:
```
>>> res = smock.mock_res("requests.get",
... ["https://mocked.url"],
... {'data': 'data'})
>>> res.status_code
200
>>> res.content
'Mocked query'
```
"""
return mock_res(name, self._data, args, kwargs)
def mock_method(self, name):
"""
Calls `mock_method`
"""
return mock_method(name, self._data)
async def mock_method_async(self, name):
"""
Calls `mock_method_async`
"""
return await mock_method_async(name, self._data)
| 25.497942 | 102 | 0.576017 | [
"Apache-2.0"
] | serverboards/serverboards-plugin-google-drive | smock.py | 6,196 | Python |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 13:27:33 2020
@author: Jin Dou
"""
import torch
def buildDataLoader(*tensors,TorchDataSetType,oSamplerType=None,**Args):
if(Args.get('DatasetArgs') != None):
DataSetArgs = Args['DatasetArgs']
dataset = TorchDataSetType(*tensors,**DataSetArgs)
else:
dataset = TorchDataSetType(*tensors)
if(Args.get('DataLoaderArgs') != None):
DataLoaderArgs = Args['DataLoaderArgs']
if(oSamplerType == None or Args.get('SamplerArgs') == None):
dataLoader = torch.utils.data.DataLoader(dataset,**DataLoaderArgs)
else:
SamplerArgs = Args.get('SamplerArgs')
oSampler = oSamplerType(dataset,**SamplerArgs)
dataLoader = torch.utils.data.DataLoader(dataset,sampler=oSampler,**DataLoaderArgs)
else:
dataLoader = torch.utils.data.DataLoader(dataset)
return dataLoader
class CPytorch:
def __init__(self):
self.Lib = self._ImportTorch()
def _ImportTorch(self):
import torch as root
return root
def _getNNAttr(self,name:str):
import torch.nn as NN
ans = getattr(NN,name)
return ans
class CTorchNNYaml(CPytorch):
def __init__(self):
super().__init__()
def _readYaml(self,filePath):
import yaml
ans = None
with open(filePath,'r') as stream:
try:
ans = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return ans
def _ParseType(self,conf:dict):
if(conf['Type'] == 'Sequential'):
return self.buildSequential(conf)
def _subListToTuple(self,oInput):
if type(oInput) == dict:
for key in oInput:
if(type(oInput[key]) == list):
oInput[key] = tuple(oInput[key])
elif type(oInput) == list:
for idx,attr in enumerate(oInput):
if type(attr) == list:
oInput[idx] = tuple(attr)
else:
raise ValueError("_subListToTuple: input should be dict or list")
def buildSequential(self,conf:dict):
oSeq = self.Lib.nn.Sequential()
ModelConfList = conf['Model']
for idx,ModelConf in enumerate(ModelConfList):
CModule = self._getNNAttr(ModelConf[0])
attr = ModelConf[1]
oModule = None
name = str(idx)
if(len(ModelConf) > 2 and type(ModelConf[2]) == dict):
'''if contain aux attribute'''
auxAttr = ModelConf[2]
if (auxAttr.get('name')!=None):
''' if aux attribute contain name attribute'''
name = auxAttr['name']
if(type(attr) == list):
if len(attr) == 0:
oModule = CModule()
elif(type(attr[0]) == list and type(attr[1]) == dict):
self._subListToTuple(attr[0])
self._subListToTuple(attr[1])
oModule = CModule(*attr[0],**attr[1])
elif(any(type(x) not in [int,float,str,bool,list] for x in attr)):
raise ValueError('attribute of Module %s (index %d) is invalid' % (ModelConf[0],idx))
else:
self._subListToTuple(attr)
oModule = CModule(*attr)
elif(type(attr) == dict):
self._subListToTuple(attr)
oModule = CModule(**attr)
else:
raise ValueError('attribute of Module %s (index %d) is invalid' % (ModelConf[0],idx))
oSeq.add_module(name,oModule)
return oSeq
def __call__(self,confFile:str):
yamlDict = self._readYaml(confFile)
return self._ParseType(yamlDict)
| 35.052632 | 105 | 0.53003 | [
"MIT"
] | powerfulbean/StellarBrainwav | StimRespFlow/DataProcessing/DeepLearning/Factory.py | 3,996 | Python |
"""
scaffoldgraph tests.core.test_fragment
"""
import pytest
from rdkit import Chem
from scaffoldgraph.core.fragment import *
@pytest.fixture(name='mol')
def test_molecule():
smiles = 'CCN1CCc2c(C1)sc(NC(=O)Nc3ccc(Cl)cc3)c2C#N'
return Chem.MolFromSmiles(smiles)
def canon(smiles):
"""Canonicalize SMILES for safety. If canonicalization ever changes this should remain consistent"""
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles))
def test_murcko(mol):
murcko = get_murcko_scaffold(mol, generic=False)
assert Chem.MolToSmiles(murcko) == canon('O=C(Nc1ccccc1)Nc1cc2c(s1)CNCC2')
murcko = get_murcko_scaffold(mol, generic=True)
assert Chem.MolToSmiles(murcko) == canon('CC(CC1CCCCC1)CC1CC2CCCCC2C1')
def test_annotation(mol):
annotation = Chem.MolToSmiles(get_annotated_murcko_scaffold(mol))
annotation = annotation.replace('1*', '*')
annotation = annotation.replace('2*', '*')
annotation = annotation.replace('3*', '*')
assert annotation.count('*') == 3
def test_murcko_all(mol):
frags = get_all_murcko_fragments(mol, break_fused_rings=True)
assert len(frags) == 6
frags = get_all_murcko_fragments(mol, break_fused_rings=False)
assert len(frags) == 3
def test_murcko_next(mol):
scf = get_murcko_scaffold(mol)
frags_1 = get_next_murcko_fragments(scf, break_fused_rings=True)
frags_1 = {Chem.MolToSmiles(x) for x in frags_1}
assert len(frags_1) == 2
frags_2 = get_next_murcko_fragments(scf, break_fused_rings=False)
frags_2 = {Chem.MolToSmiles(x) for x in frags_2}
assert len(frags_2) == 2
assert len(frags_1.intersection(frags_2)) == 1
def test_collect_linker_atoms():
mol = Chem.MolFromSmiles('CCCCCCCCCc1ccccc1')
remove_atoms = set()
a = collect_linker_atoms(mol.GetAtomWithIdx(0), remove_atoms, True)
assert len(a) == 1
assert len(remove_atoms) == 9
remove_atoms.clear()
a = collect_linker_atoms(mol.GetAtomWithIdx(0), remove_atoms, False)
assert len(a) == 1
assert len(remove_atoms) == 8
| 31.446154 | 104 | 0.719667 | [
"MIT"
] | trumanw/ScaffoldGraph | tests/core/test_fragment.py | 2,044 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from oslo_utils.secretutils import md5
from cinder import exception
from cinder.tests.unit import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.kioxia import entities
from cinder.volume.drivers.kioxia import kumoscale as kioxia
from cinder.volume.drivers.kioxia import rest_client
VOL_BACKEND_NAME = 'kioxia_kumoscale_1'
VOL_NAME = 'volume-c2fd04e3-320e-44eb-b-2'
VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba'
VOL_SIZE = 10
VOL_PROTOCOL = 'NVMeoF'
SNAP_UUID = 'c9ef9d49-0d26-44cb-b609-0b8bd2d3db77'
CONN_UUID = '34206309-3733-4cc6-a7d5-9d4dbbe377da'
CONN_HOST_NAME = 'devstack'
CONN_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \
'beaae2de-3a97-4be1-a739-6ac4bc5bf138'
success_prov_response = entities.ProvisionerResponse(None, None, "Success",
"Success")
fail_prov_response = entities.ProvisionerResponse(None, None, "Failure",
"Failure")
prov_backend1 = entities.Backend(None, None, None, None, 'dummy-pid-1')
prov_backend2 = entities.Backend(None, None, None, None, 'dummy-pid-2')
prov_location1 = entities.Location(VOL_UUID, prov_backend1)
prov_location2 = entities.Location(VOL_UUID, prov_backend2)
prov_volume = entities.VolumeProv(VOL_UUID, None, None, None,
None, None, None, None, None, None,
None, True, None, [prov_location1,
prov_location2])
prov_volumes_response = entities.ProvisionerResponse([prov_volume])
no_entities_prov_response = entities.ProvisionerResponse([], None, "Success")
class KioxiaVolumeTestCase(test.TestCase):
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_info')
@mock.patch.object(kioxia.KumoScaleBaseVolumeDriver, '_get_kumoscale')
def setUp(self, mock_kumoscale, mock_get_info):
mock_get_info.return_value = success_prov_response
mock_kumoscale.return_value = \
rest_client.KioxiaProvisioner(['1.2.3.4'], 'cert', 'token')
super(KioxiaVolumeTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_backend_name = VOL_BACKEND_NAME
self.cfg.url = 'dummyURL'
self.cfg.token = 'dummy.dummy.Rf-dummy-dummy-lE'
self.cfg.cafile = 'dummy'
self.cfg.num_replicas = 1
self.cfg.block_size = 512
self.cfg.max_iops_per_gb = 1000
self.cfg.desired_iops_per_gb = 1000
self.cfg.max_bw_per_gb = 1000
self.cfg.desired_bw_per_gb = 1000
self.cfg.same_rack_allowed = False
self.cfg.max_replica_down_time = 5
self.cfg.span_allowed = True
self.cfg.vol_reserved_space_percentage = 20
self.cfg.provisioning_type = 'THIN'
self.driver = kioxia.KumoScaleBaseVolumeDriver(configuration=self.cfg)
self.driver.configuration.get = lambda *args, **kwargs: {}
self.driver.num_replicas = 2
self.expected_stats = {
'volume_backend_name': VOL_BACKEND_NAME,
'vendor_name': 'KIOXIA',
'driver_version': self.driver.VERSION,
'storage_protocol': 'NVMeOF',
'consistencygroup_support': False,
'thin_provisioning_support': True,
'multiattach': False,
'total_capacity_gb': 1000,
'free_capacity_gb': 600
}
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_info')
def test_get_kumoscale(self, mock_get_info):
mock_get_info.return_value = success_prov_response
result = self.driver._get_kumoscale('https://1.2.3.4:8090', 'token',
'cert')
self.assertEqual(result.mgmt_ips, ['1.2.3.4'])
self.assertEqual(result.port, '8090')
self.assertEqual(result.token, 'token')
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
def test_volume_create_success(self, mock_create_volume):
testvol = _stub_volume()
mock_create_volume.return_value = success_prov_response
result = self.driver.create_volume(testvol)
args, kwargs = mock_create_volume.call_args
mock_call = args[0]
self.assertEqual(mock_call.alias, testvol['name'][:27])
self.assertEqual(mock_call.capacity, testvol['size'])
self.assertEqual(mock_call.uuid, testvol['id'])
self.assertEqual(mock_call.protocol, VOL_PROTOCOL)
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
def test_volume_create_failure(self, mock_create_volume):
testvol = _stub_volume()
mock_create_volume.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, testvol)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
def test_volume_create_exception(self, mock_create_volume):
testvol = _stub_volume()
mock_create_volume.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, testvol)
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
def test_delete_volume_success(self, mock_delete_volume):
testvol = _stub_volume()
mock_delete_volume.return_value = success_prov_response
result = self.driver.delete_volume(testvol)
mock_delete_volume.assert_any_call(testvol['id'])
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
def test_delete_volume_failure(self, mock_delete_volume):
testvol = _stub_volume()
mock_delete_volume.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, testvol)
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
def test_delete_volume_exception(self, mock_delete_volume):
testvol = _stub_volume()
mock_delete_volume.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, testvol)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target1 = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target1])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
result = self.driver.initialize_connection(testvol, testconn)
mock_host_probe.assert_any_call(testconn['nqn'],
testconn['uuid'],
testconn['host'],
'Agent', 'cinder-driver-0.1', 30)
mock_publish.assert_any_call(testconn['uuid'], testvol['id'])
mock_get_volumes_by_uuid.assert_any_call(testvol['id'])
mock_get_targets.assert_any_call(testconn['uuid'], testvol['id'])
mock_get_backend_by_id.assert_any_call('dummy-pid-1')
expected_replica = {'portals': [('1.2.3.4', '4420', 'TCP')],
'target_nqn': 'target.nqn',
'vol_uuid': testvol['id']}
expected_data = {
'vol_uuid': testvol['id'],
'alias': testvol['name'],
'writable': True,
'volume_replicas': [expected_replica]
}
expected_result = {
'driver_volume_type': 'nvmeof',
'data': expected_data
}
self.assertDictEqual(result, expected_result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_host_probe_failure(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = fail_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_host_probe_exception(
self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid,
mock_get_targets, mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.side_effect = Exception()
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_publish_failure(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = fail_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_publish_exception(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.side_effect = Exception()
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_volumes_failure(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = fail_prov_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_no_volumes(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = no_entities_prov_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_volumes_exception(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.side_effect = Exception()
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_targets_failure(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = fail_prov_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_no_targets(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = no_entities_prov_response
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_targets_exception(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
backend = BackendEntity([prov_portal])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.side_effect = Exception()
mock_get_backend_by_id.return_value = \
entities.ProvisionerResponse([backend])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_backend_failure(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_no_backend(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.return_value = no_entities_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
@mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
@mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
def test_initialize_connection_backend_exception(self, mock_host_probe,
mock_publish,
mock_get_volumes_by_uuid,
mock_get_targets,
mock_get_backend_by_id):
testvol = _stub_volume()
testconn = _stub_connector()
prov_target = TargetEntity('target.nqn', prov_backend1)
prov_targets_response = entities.ProvisionerResponse([prov_target])
mock_publish.return_value = success_prov_response
mock_host_probe.return_value = success_prov_response
mock_get_volumes_by_uuid.return_value = prov_volumes_response
mock_get_targets.return_value = prov_targets_response
mock_get_backend_by_id.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
def test_terminate_connection(self, mock_unpublish):
testvol = _stub_volume()
testconn = _stub_connector()
mock_unpublish.return_value = success_prov_response
result = self.driver.terminate_connection(testvol, testconn)
mock_unpublish.assert_any_call(testconn['uuid'], testvol['id'])
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
def test_terminate_connection_unpublish_failure(self, mock_unpublish):
testvol = _stub_volume()
testconn = _stub_connector()
mock_unpublish.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
def test_terminate_connection_unpublish_exception(self, mock_unpublish):
testvol = _stub_volume()
testconn = _stub_connector()
mock_unpublish.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection, testvol, testconn)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
def test_get_volume_stats(self, mock_get_tenants):
tenant = TenantEntity(1000, 400)
mock_get_tenants.return_value = entities.ProvisionerResponse([tenant])
result = self.driver.get_volume_stats(True)
mock_get_tenants.assert_any_call()
self.assertDictEqual(result, self.expected_stats)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
def test_get_volume_stats_tenants_failure(self, mock_get_tenants):
mock_get_tenants.return_value = fail_prov_response
self.expected_stats['total_capacity_gb'] = 'unknown'
self.expected_stats['free_capacity_gb'] = 'unknown'
self.assertDictEqual(
self.driver.get_volume_stats(True), self.expected_stats)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
def test_get_volume_stats_no_tenants(self, mock_get_tenants):
mock_get_tenants.return_value = no_entities_prov_response
self.expected_stats['total_capacity_gb'] = 'unknown'
self.expected_stats['free_capacity_gb'] = 'unknown'
self.assertDictEqual(
self.driver.get_volume_stats(True), self.expected_stats)
@mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
def test_get_volume_stats_tenants_exception(self, mock_get_tenants):
mock_get_tenants.side_effect = Exception()
self.expected_stats['total_capacity_gb'] = 'unknown'
self.expected_stats['free_capacity_gb'] = 'unknown'
self.assertDictEqual(
self.driver.get_volume_stats(True), self.expected_stats)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
def test_create_snapshot_success(self, mock_create_snapshot):
testsnap = _stub_snapshot()
mock_create_snapshot.return_value = success_prov_response
result = self.driver.create_snapshot(testsnap)
args, kwargs = mock_create_snapshot.call_args
mock_call = args[0]
self.assertEqual(mock_call.alias, testsnap['name'])
self.assertEqual(mock_call.volumeID, testsnap['volume_id'])
self.assertEqual(mock_call.snapshotID, testsnap['id'])
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
def test_create_snapshot_failure(self, mock_create_snapshot):
testsnap = _stub_snapshot()
mock_create_snapshot.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, testsnap)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
def test_create_snapshot_exception(self, mock_create_snapshot):
testsnap = _stub_snapshot()
mock_create_snapshot.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, testsnap)
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
def test_delete_snapshot_success(self, mock_delete_snapshot):
testsnap = _stub_snapshot()
mock_delete_snapshot.return_value = success_prov_response
result = self.driver.delete_snapshot(testsnap)
mock_delete_snapshot.assert_any_call(testsnap['id'])
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
def test_delete_snapshot_failure(self, mock_delete_snapshot):
testsnap = _stub_snapshot()
mock_delete_snapshot.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot, testsnap)
@mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
def test_delete_snapshot_exception(self, mock_delete_snapshot):
testsnap = _stub_snapshot()
mock_delete_snapshot.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot, testsnap)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
def test_create_volume_from_snapshot_success(self,
mock_create_snapshot_volume):
testsnap = _stub_snapshot()
testvol = _stub_volume()
mock_create_snapshot_volume.return_value = success_prov_response
result = self.driver.create_volume_from_snapshot(testvol, testsnap)
args, kwargs = mock_create_snapshot_volume.call_args
mock_call = args[0]
self.assertEqual(mock_call.alias, testvol['name'])
self.assertEqual(mock_call.volumeID, testsnap['volume_id'])
self.assertEqual(mock_call.snapshotID, testsnap['id'])
self.assertEqual(mock_call.protocol, VOL_PROTOCOL)
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
def test_create_volume_from_snapshot_failure(self,
mock_create_snapshot_volume):
testsnap = _stub_snapshot()
testvol = _stub_volume()
mock_create_snapshot_volume.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot, testvol,
testsnap)
@mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
def test_create_volume_from_snapshot_exception(
self, mock_create_snapshot_volume):
testsnap = _stub_snapshot()
testvol = _stub_volume()
mock_create_snapshot_volume.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot, testvol,
testsnap)
@mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
def test_extend_volume_success(self, mock_expand_volume):
testvol = _stub_volume()
mock_expand_volume.return_value = success_prov_response
new_size = VOL_SIZE + 2
result = self.driver.extend_volume(testvol, new_size)
mock_expand_volume.assert_any_call(new_size, testvol['id'])
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
def test_extend_volume_failure(self, mock_expand_volume):
testvol = _stub_volume()
mock_expand_volume.return_value = fail_prov_response
new_size = VOL_SIZE + 2
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, testvol, new_size)
@mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
def test_extend_volume_exception(self, mock_expand_volume):
testvol = _stub_volume()
mock_expand_volume.side_effect = Exception()
new_size = VOL_SIZE + 2
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, testvol, new_size)
@mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
def test_create_cloned_volume_success(self, mock_clone_volume):
testvol = _stub_volume()
mock_clone_volume.return_value = success_prov_response
result = self.driver.create_cloned_volume(testvol, testvol)
args, kwargs = mock_clone_volume.call_args
mock_call = args[0]
self.assertEqual(mock_call.alias, testvol['name'])
self.assertEqual(mock_call.capacity, testvol['size'])
self.assertEqual(mock_call.volumeId, testvol['id'])
self.assertEqual(mock_call.sourceVolumeId, testvol['id'])
self.assertIsNone(result)
@mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
def test_create_cloned_volume_failure(self, mock_clone_volume):
testvol = _stub_volume()
mock_clone_volume.return_value = fail_prov_response
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume, testvol, testvol)
@mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
def test_create_cloned_volume_exception(self, mock_clone_volume):
testvol = _stub_volume()
mock_clone_volume.side_effect = Exception()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume, testvol, testvol)
def test_convert_host_name(self):
name = 'ks-node3-000c2960a794-000c2960a797'
result = self.driver._convert_host_name(name)
expected = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest()
self.assertEqual(result, expected)
def test_create_export(self):
result = self.driver.create_export(None, None, None)
self.assertIsNone(result)
def test_ensure_export(self):
result = self.driver.ensure_export(None, None)
self.assertIsNone(result)
def test_remove_export(self):
result = self.driver.remove_export(None, None)
self.assertIsNone(result)
def test_check_for_setup_error(self):
result = self.driver.check_for_setup_error()
self.assertIsNone(result)
def _stub_volume(*args, **kwargs):
volume = {'id': kwargs.get('id', VOL_UUID),
'name': kwargs.get('name', VOL_NAME),
'project_id': "test-project",
'display_name': kwargs.get('display_name', VOL_NAME),
'size': kwargs.get('size', VOL_SIZE),
'provider_location': kwargs.get('provider_location', None),
'volume_type_id': kwargs.get('volume_type_id', None)}
return volume
def _stub_connector(*args, **kwargs):
connector = {'uuid': kwargs.get('uuid', CONN_UUID),
'nqn': kwargs.get('nqn', CONN_NQN),
'host': kwargs.get('host', CONN_HOST_NAME)}
return connector
def _stub_snapshot(*args, **kwargs):
volume = {'id': kwargs.get('id', SNAP_UUID),
'name': kwargs.get('name', 'snap2000'),
'volume_id': kwargs.get('id', VOL_UUID)}
return volume
class TenantEntity:
def __init__(self, capacity, consumed):
self.tenantId = '0'
self.capacity = capacity
self.consumedCapacity = consumed
class TargetEntity:
def __init__(self, name, backend):
self.targetName = name
self.backend = backend
class BackendEntity:
def __init__(self, portals):
self.portals = portals
class PortalEntity:
def __init__(self, ip, port, transport):
self.ip = ip
self.port = port
self.transport = transport
if __name__ == '__main__':
unittest.main()
| 52.20156 | 79 | 0.670254 | [
"Apache-2.0"
] | Boye-Z/cinder | cinder/tests/unit/volume/drivers/test_kioxia.py | 40,143 | Python |
# Generated by Django 3.1 on 2020-09-28 07:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0065_bugtracker'),
]
operations = [
migrations.RenameField(
model_name='bugtracker',
old_name='bug',
new_name='subject',
),
migrations.RenameField(
model_name='bugtracker',
old_name='user_device_info',
new_name='user_device_information',
),
migrations.AddField(
model_name='bugtracker',
name='project',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.project'),
),
migrations.AlterField(
model_name='bugtracker',
name='bug_severity',
field=models.CharField(choices=[('Low', 'Low'), ('Minor', 'Minor'), ('Major', 'Major'), ('Critical', 'Critical'), ('Not a bug', 'Not a bug')], default=None, max_length=10),
),
]
| 30.742857 | 184 | 0.578996 | [
"MIT"
] | gade-raghav/project-enhancements | base/migrations/0066_auto_20200928_0706.py | 1,076 | Python |
"""Top-level package for pomdp-belief-tracking."""
__author__ = """sammie katt"""
__email__ = "[email protected]"
__version__ = "0.1.0"
from pomdp_belief_tracking import pf
| 22.375 | 50 | 0.731844 | [
"MIT"
] | kevslinger/pomdp-belief-tracking | pomdp_belief_tracking/__init__.py | 179 | Python |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains the behaviour of a generic seller AEA."""
from typing import cast
from aea.skills.behaviours import TickerBehaviour
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.generic_seller.dialogues import (
LedgerApiDialogues,
OefSearchDialogues,
)
from packages.fetchai.skills.generic_seller.strategy import GenericStrategy
DEFAULT_SERVICES_INTERVAL = 60.0
LEDGER_API_ADDRESS = "fetchai/ledger:0.3.0"
class GenericServiceRegistrationBehaviour(TickerBehaviour):
"""This class implements a behaviour."""
def __init__(self, **kwargs):
"""Initialise the behaviour."""
services_interval = kwargs.pop(
"services_interval", DEFAULT_SERVICES_INTERVAL
) # type: int
super().__init__(tick_interval=services_interval, **kwargs)
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
if strategy.is_ledger_tx:
ledger_api_dialogues = cast(
LedgerApiDialogues, self.context.ledger_api_dialogues
)
ledger_api_msg = LedgerApiMessage(
performative=LedgerApiMessage.Performative.GET_BALANCE,
dialogue_reference=ledger_api_dialogues.new_self_initiated_dialogue_reference(),
ledger_id=strategy.ledger_id,
address=cast(str, self.context.agent_addresses.get(strategy.ledger_id)),
)
ledger_api_msg.counterparty = LEDGER_API_ADDRESS
ledger_api_dialogues.update(ledger_api_msg)
self.context.outbox.put_message(message=ledger_api_msg)
self._register_agent()
self._register_service()
def act(self) -> None:
"""
Implement the act.
:return: None
"""
# self._unregister_service()
# self._register_service()
def teardown(self) -> None:
"""
Implement the task teardown.
:return: None
"""
self._unregister_service()
self._unregister_agent()
def _register_agent(self) -> None:
"""
Register the agent's location.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("registering agent on SOEF.")
def _register_service(self) -> None:
"""
Register the agent's service.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_register_service_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("registering service on SOEF.")
def _unregister_service(self) -> None:
"""
Unregister service from the SOEF.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_unregister_service_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("unregistering service from SOEF.")
def _unregister_agent(self) -> None:
"""
Unregister agent from the SOEF.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("unregistering agent from SOEF.")
| 38.086705 | 96 | 0.670815 | [
"Apache-2.0"
] | ejfitzgerald/agents-aea | packages/fetchai/skills/generic_seller/behaviours.py | 6,589 | Python |
"""Convert MUSDB18 dataset to .wav format.
Output .wav files contain 5 channels
- `0` - The mixture,
- `1` - The drums,
- `2` - The bass,
- `3` - The rest of the accompaniment,
- `4` - The vocals.
"""
import argparse
import os
import subprocess
import tempfile
import librosa
import numpy as np
import soundfile as sf
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('origin_dataset_dir',
help='Path of the original dataset (.mp4)',
type=str)
parser.add_argument('new_dataset_dir',
help='Output path of .wav dataset',
type=str)
parser.add_argument('--sr',
help='Sample rate. (Default: 22050) ',
type=int, default=22050)
args = parser.parse_args()
origin_dataset_dir = args.origin_dataset_dir
new_dataset_dir = args.new_dataset_dir
if os.path.isdir(new_dataset_dir):
raise FileExistsError(f'{new_dataset_dir} already exists.')
else:
os.mkdir(new_dataset_dir)
os.mkdir(os.path.join(new_dataset_dir, 'train'))
os.mkdir(os.path.join(new_dataset_dir, 'test'))
with tempfile.TemporaryDirectory() as tmpdir:
for subdir in ('train', 'test'):
origin_dir = os.path.join(origin_dataset_dir, subdir)
files = [f for f in os.listdir(origin_dir)
if os.path.splitext(f)[1] == '.mp4']
for file in files:
path = os.path.join(origin_dir, file)
name = os.path.splitext(file)[0]
wav_data = []
# Extract & save the sound of `ch` channel to a temp directory
# and then concatenate all channels to a single .wav file
for ch in range(5):
temp_fn = f'{name}.{ch}.wav'
out_path = os.path.join(tmpdir, temp_fn)
subprocess.run(['ffmpeg', '-i', path,
'-map', f'0:{ch}', out_path])
sound, _ = librosa.load(out_path, sr=args.sr, mono=True)
wav_data.append(sound)
wav_data = np.stack(wav_data, axis=1)
out_path = os.path.join(
new_dataset_dir, subdir, f'{name}.wav')
sf.write(out_path, wav_data, args.sr)
if __name__ == '__main__':
main()
| 35.202899 | 78 | 0.558666 | [
"MIT"
] | mori97/U-Net_MUSDB18 | src/convert_to_wav.py | 2,429 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# RobotPy WPILib documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 2 21:31:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#
# Imports
#
import sys
import os
from os.path import abspath, join, dirname
sys.path.insert(0, abspath(join(dirname(__file__))))
# -- RTD configuration ------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# This is used for linking and such so we link to the thing we're building
rtd_version = os.environ.get("READTHEDOCS_VERSION", "latest")
if rtd_version not in ["stable", "latest"]:
rtd_version = "stable"
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_inline_tabs",
"sphinxext.opengraph",
"sphinx_reredirects",
]
ogp_custom_meta_tags = [
'<meta property="og:ignore_canonical" content="true" />',
'<meta name="theme-color" content="#3393d5" />',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "RobotPy"
copyright = "2014-2020, RobotPy development team"
intersphinx_mapping = {
"commandsv1": (
"https://robotpy.readthedocs.io/projects/commands-v1/en/%s/" % rtd_version,
None,
),
"commandsv2": (
"https://robotpy.readthedocs.io/projects/commands-v2/en/%s/" % rtd_version,
None,
),
"pyfrc": (
"https://robotpy.readthedocs.io/projects/pyfrc/en/%s/" % rtd_version,
None,
),
"networktables": (
"https://robotpy.readthedocs.io/projects/pynetworktables/en/%s/" % rtd_version,
None,
),
"wpilib": (
"https://robotpy.readthedocs.io/projects/wpilib/en/%s/" % rtd_version,
None,
),
"hal": (
"https://robotpy.readthedocs.io/projects/hal/en/%s/" % rtd_version,
None,
),
"robotpy_ext": (
"https://robotpy.readthedocs.io/projects/utilities/en/%s/" % rtd_version,
None,
),
"cscore": (
"https://robotpy.readthedocs.io/projects/cscore/en/%s/" % rtd_version,
None,
),
"frc": ("https://docs.wpilib.org/en/stable", None),
}
redirects = {
"2020_notes": "upgrade_notes.html"
}
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "2021"
# The full version, including alpha/beta/rc tags.
release = version
autoclass_content = "both"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Output file base name for HTML help builder.
htmlhelp_basename = "RobotPy"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"RobotPy.tex",
"RobotPy Documentation",
"RobotPy development team",
"manual",
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"RobotPy",
"RobotPy Documentation",
"RobotPy development team",
"RobotPy",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "RobotPy"
epub_author = "RobotPy development team"
epub_publisher = "RobotPy development team"
epub_copyright = "2014-2020, RobotPy development team"
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Custom Document processing ----------------------------------------------
from robotpy_sphinx.sidebar import generate_sidebar
generate_sidebar(
globals(),
"robotpy",
"https://raw.githubusercontent.com/robotpy/docs-sidebar/master/sidebar.toml",
)
| 28.079602 | 98 | 0.644401 | [
"Apache-2.0"
] | KenwoodFox/robotpy-docs | conf.py | 5,644 | Python |
# Space: O(n)
# Time: O(n!)
class CombinationIterator:
def __init__(self, characters: str, combinationLength: int):
self.data = characters
self.res = self.combine(self.data, combinationLength)
self.counter = 0
self.res_count = len(self.res)
def next(self) -> str:
if self.hasNext():
res = self.res[self.counter]
self.counter += 1
return res
def hasNext(self) -> bool:
return self.counter < self.res_count
def combine(self, data, length):
if length > len(data): return []
def dfs(data, index, temp_res, res, length):
if len(temp_res) == length:
res.append(temp_res)
return
for i in range(index, len(data)):
temp_res += data[i]
dfs(data, i + 1, temp_res, res, length)
temp_res = temp_res[:-1]
return res
return dfs(data, 0, '', [], length)
| 23.619048 | 64 | 0.529234 | [
"MIT"
] | lht19900714/Leetcode_Python | Algorithms/1286_Iterator_for_Combination/Python/Iterator_for_Combination_Solution_1.py | 992 | Python |
# Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""Setup xlearn package."""
from __future__ import absolute_import
import sys
import os
from setuptools import setup, find_packages
sys.path.insert(0, '.')
CURRENT_DIR = os.path.dirname(__file__)
libpath_py = os.path.join(CURRENT_DIR, 'xlearn/libpath.py')
libpath = {'__file__': libpath_py}
exec(compile(open(libpath_py, "rb").read(), libpath_py, 'exec'), libpath, libpath)
LIB_PATH = [os.path.relpath(libfile, CURRENT_DIR) for libfile in libpath['find_lib_path']()]
print("Install libxlearn_api from: %s" % LIB_PATH)
setup(name='xlearn',
version=open(os.path.join(CURRENT_DIR, 'xlearn/VERSION')).read().strip(),
description="xLearn Python Package",
maintainer='Chao Ma',
maintainer_email='[email protected]',
zip_safe=False,
packages=find_packages(),
# this will use MANIFEST.in during install where we specify additional files,
# this is the golden line
include_package_data=True,
install_requires=[
"numpy",
"scipy"
],
data_files=[('xlearn', LIB_PATH)],
license='Apache-2.0',
classifiers=['License :: OSI Approved :: Apache Software License'],
url='https://github.com/aksnzhy/xlearn') | 37.285714 | 92 | 0.708265 | [
"Apache-2.0"
] | ccgcyber/xlearn | python-package/setup.py | 1,827 | Python |
# Copyright (c) 2015 Brian Haskin Jr.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import os.path
import socket
import unittest
from pyrimaa import aei, board
from pyrimaa.aei import EngineController, EngineException, EngineResponse
class MockEngine:
def __init__(self, expected):
self.log = None
self.expected = expected
self.event = 0
self._closed = False
def is_running(self):
return False if self._closed else True
def send(self, msg):
if self._closed:
raise Exception("Mock engine send called after cleanup.")
expected = self.expected[self.event]
if expected[0] == "raise":
self.event += 1
raise expected[1]
if expected[0] != "s":
raise Exception("Mock engine send called when expecting, %s" %
(expected, ))
if msg.rstrip() != expected[1]:
raise Exception(
"Mock engine send called with unexpected message (%s) expected (%s)."
% (msg, expected[1]))
self.event += 1
def readline(self, timeout=None):
if self._closed:
raise Exception("Mock engine readline called after cleanup.")
expected = self.expected[self.event]
if expected[0] != "r":
raise Exception("Mock engine readline called when expecting, %s" %
(expected[1], ))
self.event += 1
return expected[1]
def waitfor(self, msg, timeout=0.5):
if self._closed:
raise Exception("Mock engine waitfor called after cleanup.")
msg = msg.rstrip()
expected = self.expected[self.event]
if expected[0] not in ["r", "raise"]:
raise Exception("Mock engine waitfor called when expecting, %s" %
(expected, ))
responses = []
while expected[0] == "r" and expected[1] != msg:
responses.append(expected[1])
self.event += 1
expected = self.expected[self.event]
if expected[0] == "r" and msg == expected[1]:
responses.append(expected[1])
elif expected[0] == "send_response":
pass
elif expected[0] == "raise":
self.event += 1
raise expected[1]()
else:
raise Exception(
"Mock engine waitfor called with unexpected message (%s)" %
(msg, ))
self.event += 1
return responses
def cleanup(self):
if self._closed:
raise Exception("Mock engine cleanup called multiple times.")
self._closed = True
class MockLog:
def __init__(self):
self.debugging = ""
self.information = ""
self.warning = ""
def debug(self, message):
self.debugging += message + '\n'
def info(self, message):
self.information += message + '\n'
def warn(self, message):
self.warning += message + '\n'
protocol0 = [
("s", "aei"),
("r", "id name Mock0"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s",
"setposition w [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
]
bad_protocol = [
("s", "aei"),
("r", "protocol-version abc"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s",
"setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
("s", "go"),
("s", "stop"),
("s", "quit"),
]
protocol1 = [
("s", "aei"),
("r", "protocol-version 1"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "log Engine running"),
("r", "readyok"),
("r", ""),
("r", "log Engine initialized"),
("s", "setoption name depth value 4"),
("s", "newgame"),
("s",
"setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
("s", "go"),
("s", "stop"),
("r", "info depth 4"),
("r", "bestmove Hb2n Ed2n"),
("s", "makemove Hb2n Ed2n"),
("s", "go ponder"),
("s", "quit"),
]
bad_isready_response = [
("s", "aei"),
("r", "protocol-version 1"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s", "isready"),
("r", "log Engine shutting down"),
("send_response",),
]
aeiok_timeout = [
("s", "aei"),
("raise", socket.timeout),
]
aei_send_error = [
("raise", IOError),
]
class EngineControllerTest(unittest.TestCase):
def test_protocol_versions(self):
eng = MockEngine(protocol0)
ctl = EngineController(eng)
self.assertEqual(ctl.ident["name"], "Mock0")
self.assertEqual(ctl.ident["author"], "Janzert")
self.assertEqual(ctl.protocol_version, 0)
ctl.newgame()
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.setposition(pos)
ctl.cleanup()
# bad protocol version
eng = MockEngine(bad_protocol)
eng.log = MockLog()
ctl = EngineController(eng)
self.assertIn("Unrecognized protocol version", eng.log.warning)
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.newgame()
ctl.setposition(pos)
ctl.go()
ctl.stop()
ctl.quit()
def test_controller(self):
eng = MockEngine(protocol1)
ctl = EngineController(eng)
self.assertEqual(ctl.ident["name"], "Mock")
self.assertEqual(ctl.ident["author"], "Janzert")
self.assertEqual(ctl.protocol_version, 1)
self.assertEqual(ctl.is_running(), True)
self.assertRaises(socket.timeout, ctl.get_response)
resp = ctl.get_response()
self.assertIsInstance(resp, EngineResponse)
self.assertEqual(resp.type, "log")
self.assertEqual(resp.message,
eng.expected[eng.event - 1][1].lstrip("log "))
ctl.setoption("depth", 4)
ctl.newgame()
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.setposition(pos)
ctl.go()
ctl.stop()
resp = ctl.get_response()
self.assertEqual(resp.type, "info")
self.assertEqual(resp.message,
eng.expected[eng.event - 1][1].lstrip("info "))
resp = ctl.get_response()
self.assertEqual(resp.type, "bestmove")
self.assertEqual(resp.move,
eng.expected[eng.event - 1][1].lstrip("bestmove "))
ctl.makemove("Hb2n Ed2n")
ctl.go("ponder")
ctl.quit()
ctl.cleanup()
# bad response to isready
eng = MockEngine(bad_isready_response)
ctl = EngineController(eng)
ctl.newgame()
self.assertRaises(EngineException, ctl.isready)
# timeout waiting for aeiok
eng = MockEngine(aeiok_timeout)
self.assertRaises(EngineException, EngineController, eng)
# IOError sending aei
eng = MockEngine(aei_send_error)
self.assertRaises(EngineException, EngineController, eng)
def _check_engine(self, eng):
self.assertEqual(eng.is_running(), True)
eng.send("aei\n")
response = eng.waitfor("aeiok")
self.assertEqual(response[-1], "aeiok")
self.assertRaises(socket.timeout, eng.readline, timeout=0.05)
eng.send("isready\n")
response = eng.readline()
self.assertEqual(response, "readyok")
eng.send("quit\n")
eng.waitfor("log")
self.assertRaises(EngineException, eng.waitfor, "invalid", timeout=0.05)
eng.cleanup()
self.assertEqual(eng.active, False)
def test_stdioengine(self):
eng = aei.get_engine("stdio", "simple_engine")
self.assertIsInstance(eng, aei.StdioEngine)
self._check_engine(eng)
eng = aei.get_engine("stdio", "simple_engine", "aei")
self._check_engine(eng)
def test_socketengine(self):
path = os.path.dirname(__file__)
adapter = os.path.join(path, "socketadapter.py")
eng = aei.get_engine("socket", adapter)
self.assertIsInstance(eng, aei.SocketEngine)
self._check_engine(eng)
eng = aei.get_engine("socket", adapter, "aei")
self.assertIsInstance(eng, aei.SocketEngine)
self._check_engine(eng)
eng = aei.get_engine("2008cc", adapter + " --legacy")
self._check_engine(eng)
| 33.213058 | 87 | 0.58148 | [
"MIT"
] | TFiFiE/AEI | pyrimaa/tests/test_aei.py | 9,665 | Python |
#!/usr/bin/env python
import numpy as np
import datetime as dt
import sys, os, pickle, time
from keras.models import Model, save_model, load_model
from keras.regularizers import l2
from keras.optimizers import SGD, Adam
import keras.backend as K
import tensorflow as tf
import pandas as pd
import innvestigate
import innvestigate.utils as iutils
from ml_functions import read_csv_files, normalize_multivariate_data, log, get_features
def brier_score_keras(obs, preds):
return K.mean((preds - obs) ** 2)
def brier_skill_score_keras(obs, preds):
climo = K.mean((obs - K.mean(obs)) ** 2)
bs = brier_score_keras(obs, preds)
ratio = (bs / climo)
return climo
def auc(obs, preds):
auc = tf.metrics.auc(obs, preds)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
def log(msg):
print( time.ctime(time.time()), msg )
### NEURAL NETWORK PARAMETERS ###
nn_params = { 'num_layers': 1, 'num_neurons': [ 1024 ], 'dropout': 0.1, 'lr': 0.001, 'num_epochs': 30, \
'report_window_space':[ int(sys.argv[1]) ], 'report_window_time':[ int(sys.argv[2]) ] }
dataset = 'RT2020'
scaling_dataset = 'NSC3km-12sec'
scaling_file = '/glade/work/sobash/NSC_objects/scaling_values_all_%s.pk'%scaling_dataset
trained_models_dir = '/glade/work/sobash/NSC_objects/trained_models_paper'
sdate = dt.datetime(2020,5,1,0,0,0)
edate = dt.datetime(2020,5,10,0,0,0)
dateinc = dt.timedelta(days=1)
features = get_features('basic')
log('Reading Data')
# read data and reassign data types to float32 to save memory
type_dict = {}
for f in features: type_dict[f]='float32'
df, numfcsts = read_csv_files(sdate, edate, dataset)
print(numfcsts)
scaling_values = pickle.load(open(scaling_file, 'rb'))
norm_in_data, scaling_values = normalize_multivariate_data(df[features].values.astype(np.float32), features, scaling_values=scaling_values)
dense_model = None
model_fname = '%s/neural_network_2016_120km_2hr_nn%d_drop%.1f_basic.h5'%(trained_models_dir,nn_params['num_neurons'][0],nn_params['dropout'])
dense_model = load_model(model_fname, custom_objects={'brier_score_keras': brier_score_keras, 'brier_skill_score_keras':brier_skill_score_keras, 'auc':auc })
print(norm_in_data.shape)
analyzer = innvestigate.create_analyzer('lrp.alpha_2_beta_1', dense_model, neuron_selection_mode='index')
a = analyzer.analyze(norm_in_data, 0)
a /= np.max(np.abs(a))
a = a.reshape((36,1298,-1))
a = np.mean(a[24,:,:], axis=0)
print(a.shape)
for i,f in enumerate(features):
print(f, a[i])
log('Finished')
| 31.8 | 157 | 0.737421 | [
"MIT"
] | ahijevyc/NSC_objects | neural_network_lrp.py | 2,544 | Python |
#!/usr/bin/env python3
import torch
import torch.optim as optim
import os, sys
import warnings
import numpy as np
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, os.pardir, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
from common.fast_rl.common.utils import EarlyStopping
from common.environments import get_data
from codes.f_utils import common_utils
from common.environments import TimeUnit, TradeEnvironmentType, Action
from common.environments import UpbitEnvironment
from common.environments import EpsilonGreedyTradeDQNActionSelector, \
ArgmaxTradeActionSelector, RandomTradeDQNActionSelector
from common.fast_rl import rl_agent, value_based_model, actions, experience_single, replay_buffer
from common.fast_rl.common import utils
from common.fast_rl.common import statistics
from rl_main.trade_main import visualizer
from common.slack import PushSlack
pusher = PushSlack()
##### NOTE #####
from codes.a_config.parameters import PARAMETERS as params
##### NOTE #####
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MODEL_SAVE_DIR = os.path.join(PROJECT_HOME, "out", "model_save_files")
if not os.path.exists(MODEL_SAVE_DIR):
os.makedirs(MODEL_SAVE_DIR)
def evaluate(env, agent, verbose=True):
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(env, agent, gamma=params.GAMMA, n_step=params.N_STEP)
done = False
state = env.reset()
agent_state = agent.initial_agent_state()
episode_reward = 0.0
num_buys = 0
info = None
step_idx = 0
while not done:
step_idx += 1
states_input = []
processed_state = experience_source.get_processed_state(state)
states_input.append(processed_state)
agent_states_input = []
agent_states_input.append(agent_state)
new_actions, new_agent_states = agent(states_input, agent_states_input)
agent_state = new_agent_states[0]
action = new_actions[0]
if action == Action.MARKET_BUY.value:
num_buys += 1
if num_buys > 10:
action_str = "BUY({0})".format(10)
else:
action_str = "BUY({0})".format(num_buys)
else:
action_str = env.get_action_meanings()[action]
msg = "[{0:2}|{1}] OHLCV: {2}, {3}, {4}, {5}, {6:<10.1f}, Action: {7:7} --> ".format(
step_idx,
env.data.iloc[env.transaction_state_idx]['datetime_krw'],
env.data.iloc[env.transaction_state_idx]['open'],
env.data.iloc[env.transaction_state_idx]['high'],
env.data.iloc[env.transaction_state_idx]['low'],
env.data.iloc[env.transaction_state_idx]['final'],
env.data.iloc[env.transaction_state_idx]['volume'],
action_str
)
next_state, reward, done, info = env.step(action)
if action in [Action.HOLD.value]:
msg += "Reward: {0:.3f}, hold coin: {1:.1f}".format(
reward, info["hold_coin"]
)
elif action == Action.MARKET_BUY.value:
if num_buys <= 10:
coin_krw_str = "{0:.1f}".format(info['coin_krw'])
commission_fee_str = "{0:.1f}".format(info['commission_fee'])
else:
coin_krw_str = "-"
commission_fee_str = "-"
msg += "Reward: {0:.3f}, slippage: {1:.1f}, coin_unit_price: {2:.1f}, " \
"coin_krw: {3}, commission: {4}, hold coin: {5:.1f}".format(
reward, info["slippage"], info["coin_unit_price"],
coin_krw_str, commission_fee_str, info["hold_coin"]
)
elif action == Action.MARKET_SELL.value:
msg += "Reward: {0:.3f}, slippage: {1:.1f}, coin_unit_price: {2:.1f}, " \
"coin_krw: {3:.1f}, commission: {4:.1f}, sold coin: {5:.1f}, profit: {6:.1f}".format(
reward, info["slippage"], info["coin_unit_price"],
info['coin_krw'], info['commission_fee'], info["sold_coin"], info["profit"]
)
else:
raise ValueError()
if verbose:
print(msg)
episode_reward += reward
state = next_state
if verbose:
print("SAMPLED TRANSACTION DONE! - START DATETIME: {0}, EPISODE REWARD: {1:>8.3f}, "
"PROFIT: {2:>10.1f}, STEPS: {3}".format(
env.transaction_start_datetime, episode_reward, info["profit"], step_idx
))
return info["profit"], step_idx
def train(coin_name, time_unit, train_env, evaluate_env):
common_utils.print_fast_rl_params(params)
params.BATCH_SIZE *= params.TRAIN_STEP_FREQ
net = value_based_model.DuelingDQNSmallCNN(
observation_shape=train_env.observation_space.shape,
n_actions=train_env.action_space.n
).to(device)
print(net)
print("ACTION MEANING: {0}".format(train_env.get_action_meanings()))
tgt_net = value_based_model.DuelingDQNSmallCNN(
observation_shape=train_env.observation_space.shape,
n_actions=train_env.action_space.n
).to(device)
action_selector = EpsilonGreedyTradeDQNActionSelector(epsilon=params.EPSILON_INIT, env=train_env)
agent = rl_agent.DQNAgent(dqn_model=net, action_selector=action_selector, device=device)
argmax_action_selector = ArgmaxTradeActionSelector(env=evaluate_env)
evaluate_agent = rl_agent.DQNAgent(dqn_model=net, action_selector=argmax_action_selector, device=device)
random_action_selector = RandomTradeDQNActionSelector(env=evaluate_env)
random_agent = rl_agent.DQNAgent(dqn_model=None, action_selector=random_action_selector, device=device)
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(
train_env, agent, gamma=params.GAMMA, n_step=params.N_STEP
)
buffer = replay_buffer.ExperienceReplayBuffer(experience_source, buffer_size=params.REPLAY_BUFFER_SIZE)
optimizer = optim.Adam(net.parameters(), lr=params.LEARNING_RATE)
step_idx = 0
last_loss = 0.0
evaluate_steps = []
evaluate_dqn_total_profits = []
evaluate_random_total_profits = []
early_stopping = EarlyStopping(
patience=params.STOP_PATIENCE_COUNT,
evaluation_min_threshold=params.TRAIN_STOP_EPISODE_REWARD,
verbose=True,
delta=0.0,
model_save_dir=MODEL_SAVE_DIR,
model_save_file_prefix=params.ENVIRONMENT_ID.value + "_" + coin_name + "_" + time_unit.value,
agent=agent
)
with utils.SpeedTracker(params=params, frame=False, early_stopping=None) as reward_tracker:
while step_idx < params.MAX_GLOBAL_STEP:
step_idx += params.TRAIN_STEP_FREQ
last_entry = buffer.populate(params.TRAIN_STEP_FREQ)
if epsilon_tracker:
epsilon_tracker.udpate(step_idx)
episode_rewards = experience_source.pop_episode_reward_lst()
solved = False
if episode_rewards:
for episode_reward in episode_rewards:
reward_tracker.set_episode_reward(
episode_reward, step_idx, action_selector.epsilon, last_info=last_entry.info,
last_loss=last_loss, model=net
)
if reward_tracker.done_episodes % params.TEST_PERIOD_EPISODE == 0:
print("#" * 200)
print("[TEST START]")
evaluate(evaluate_env, evaluate_agent)
evaluate_steps.append(step_idx)
dqn_total_profit, _ = evaluate_random(
"DQN", evaluate_env, evaluate_agent, num_episodes=100
)
evaluate_dqn_total_profits.append(dqn_total_profit)
random_total_profit, _ = evaluate_random(
"RANDOM", evaluate_env, random_agent, num_episodes=100
)
evaluate_random_total_profits.append(random_total_profit)
solved = early_stopping(dqn_total_profit, step_idx=step_idx)
visualizer.draw_performance(
evaluate_steps,
evaluate_dqn_total_profits,
evaluate_random_total_profits
)
print("[TEST END]")
print("#" * 200)
if solved:
break
if solved:
break
optimizer.zero_grad()
batch = buffer.sample(params.BATCH_SIZE)
loss_v = value_based_model.calc_loss_double_dqn(batch, net, tgt_net, gamma=params.GAMMA, device=device)
loss_v.backward()
optimizer.step()
draw_loss = min(1.0, loss_v.detach().item())
last_loss = loss_v.detach().item()
if step_idx % params.TARGET_NET_SYNC_STEP_PERIOD < params.TRAIN_STEP_FREQ:
tgt_net.sync(net)
return net
def evaluate_random(agent_type, env, agent, num_episodes, verbose=True):
num_positive = 0
num_negative = 0
total_profit = 0.0
total_steps = 0
for _ in range(num_episodes):
profit, step = evaluate(env, agent, verbose=False)
if profit > 0:
num_positive += 1
else:
num_negative += 1
total_profit += profit
total_steps += step
avg_num_steps_per_episode = total_steps / num_episodes
if verbose:
print("###[{0:6}] POSITIVE: {1}/{3}, NEGATIVE: {2}/{3}, TOTAL PROFIT: {4:.1f}, AVG. STEP FOR EPISODE: {5:.1f}".format(
agent_type, num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode
))
return total_profit, avg_num_steps_per_episode
def evaluate_sequential_all(agent_type, env, agent, data_size, verbose=True):
num_positive = 0
num_negative = 0
total_profit = 0.0
total_steps = 0
num_episodes = 0
env.transaction_state_idx = 0
while True:
num_episodes += 1
profit, step = evaluate(env, agent, verbose=False)
if profit > 0:
num_positive += 1
else:
num_negative += 1
total_profit += profit
total_steps += step
if env.transaction_state_idx >= data_size - 1:
break
avg_num_steps_per_episode = total_steps / num_episodes
if verbose:
print("###[{0:6}] POSITIVE: {1}/{3}, NEGATIVE: {2}/{3}, TOTAL PROFIT: {4:.1f}, AVG. STEP FOR EPISODE: {5:.1f}".format(
agent_type, num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode
))
return num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode
def main():
coin_name = "OMG"
time_unit = TimeUnit.ONE_HOUR
train_data_info, evaluate_data_info = get_data(coin_name=coin_name, time_unit=time_unit)
print(train_data_info["first_datetime_krw"], train_data_info["last_datetime_krw"])
print(evaluate_data_info["first_datetime_krw"], evaluate_data_info["last_datetime_krw"])
train_env = UpbitEnvironment(
coin_name=coin_name,
time_unit=time_unit,
data_info=train_data_info,
environment_type=TradeEnvironmentType.TRAIN
)
evaluate_random_env = UpbitEnvironment(
coin_name=coin_name,
time_unit=time_unit,
data_info=evaluate_data_info,
environment_type=TradeEnvironmentType.TEST_RANDOM,
)
net = train(coin_name, time_unit, train_env, evaluate_random_env)
print("#### TEST SEQUENTIALLY")
evaluate_sequential_env = UpbitEnvironment(
coin_name=coin_name,
time_unit=time_unit,
data_info=evaluate_data_info,
environment_type=TradeEnvironmentType.TEST_SEQUENTIAL,
)
argmax_action_selector = ArgmaxTradeActionSelector(env=evaluate_sequential_env)
evaluate_agent = rl_agent.DQNAgent(dqn_model=net, action_selector=argmax_action_selector, device=device)
sequential_dqn_num_positives = []
sequential_dqn_num_negatives = []
sequential_dqn_num_episodes = []
sequential_dqn_num_steps_per_episode = []
sequential_dqn_total_profits = []
for _ in range(10):
num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode = evaluate_sequential_all(
"DQN", evaluate_sequential_env, evaluate_agent, data_size=len(evaluate_data_info["data"]), verbose=False
)
sequential_dqn_num_positives.append(num_positive)
sequential_dqn_num_negatives.append(num_negative)
sequential_dqn_num_episodes.append(num_episodes)
sequential_dqn_total_profits.append(total_profit)
sequential_dqn_num_steps_per_episode.append(avg_num_steps_per_episode)
dqn_msg = f"SEQUENTIAL: DQN - {np.mean(sequential_dqn_num_episodes):.1f} EPISODES - " \
f"POSITIVE: {np.mean(sequential_dqn_num_positives):.1f}, " \
f"NEGATIVE: {np.mean(sequential_dqn_num_negatives):.1f}, " \
f"AVERAGE PROFIT {np.mean(sequential_dqn_total_profits):.1f}/STD {np.std(sequential_dqn_total_profits):.1f}, " \
f"AVERAGE STEP {np.mean(sequential_dqn_num_steps_per_episode):.1f}"
print(dqn_msg)
random_action_selector = RandomTradeDQNActionSelector(env=evaluate_sequential_env)
random_agent = rl_agent.DQNAgent(dqn_model=None, action_selector=random_action_selector, device=device)
sequential_random_num_positives = []
sequential_random_num_negatives = []
sequential_random_num_episodes = []
sequential_random_num_steps_per_episode = []
sequential_random_total_profits = []
for _ in range(10):
num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode = evaluate_sequential_all(
"RANDOM", evaluate_sequential_env, random_agent, data_size=len(evaluate_data_info["data"]), verbose=False
)
sequential_random_num_positives.append(num_positive)
sequential_random_num_negatives.append(num_negative)
sequential_random_num_episodes.append(num_episodes)
sequential_random_total_profits.append(total_profit)
sequential_random_num_steps_per_episode.append(avg_num_steps_per_episode)
random_msg = f"SEQUENTIAL: RANDOM - {np.mean(sequential_random_num_episodes):.1f} EPISODES - " \
f"POSITIVE: {np.mean(sequential_random_num_positives):.1f}, " \
f"NEGATIVE: {np.mean(sequential_random_num_negatives):.1f}, " \
f"AVERAGE PROFIT {np.mean(sequential_random_total_profits):.1f}/STD {np.std(sequential_random_total_profits):.1f}, " \
f"AVERAGE STEP {np.mean(sequential_random_num_steps_per_episode):.1f}"
print(random_msg)
pusher.send_message(
"me", dqn_msg
)
pusher.send_message(
"me", random_msg
)
if __name__ == "__main__":
main() | 38.650633 | 135 | 0.658807 | [
"MIT"
] | linklab/link_rl | codes/f_main/trade_main/upbit_trade_main.py | 15,267 | Python |
import os
import warnings
warnings.filterwarnings("ignore")
shared_params = ('python CPT_STMeta_Simplify_Obj.py '
'--Dataset ChargeStation '
'--CT 6 '
'--PT 7 '
'--TT 4 '
'--GLL 1 '
'--LSTMUnits 64 '
'--GALUnits 64 '
'--GALHeads 2 '
'--DenseUnits 32 '
'--DataRange All '
'--TrainDays All '
'--TC 0.1 '
'--TD 1000 '
'--TI 500 '
'--Epoch 10000 '
'--Train False '
'--lr 2e-5 '
'--Normalize True '
'--patience 0.1 '
'--ESlength 200 '
'--BatchSize 128 '
'--Device 0 ')
if __name__ == "__main__":
# 可以先选择在 DiDi-Xian, DiDi-Chengdu, Metro-Shanghai, ChargeStation-Beijing 这几个数据集上进行测试,因为耗时比较短
# stability test
test_times = 10
for i in range(test_times):
os.system(shared_params + '--CT 6 --PT 7 --TT 4 --City Beijing --Group Beijing'
' --K 1 --L 1 --Graph Distance-Correlation --CodeVersion ST_Sim1_%s' % i) | 33.447368 | 107 | 0.415421 | [
"MIT"
] | GRE-EXAMINATION/UCTB | Experiments/StabilityTest/Master_CS_0.py | 1,321 | Python |
# pylint: disable=redefined-outer-name
import asyncio
import time
import pytest
DEFAULT_MAX_LATENCY = 10 * 1000
@pytest.mark.asyncio
async def test_slow_server(host):
if not pytest.enable_microbatch:
pytest.skip()
A, B = 0.2, 1
data = '{"a": %s, "b": %s}' % (A, B)
time_start = time.time()
req_count = 10
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
timeout=30,
assert_status=200,
assert_data=data.encode(),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
assert time.time() - time_start < 12
req_count = 100
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
assert_status=lambda i: i in (200, 429),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
@pytest.mark.asyncio
async def test_fast_server(host):
if not pytest.enable_microbatch:
pytest.skip()
A, B = 0.0002, 0.01
data = '{"a": %s, "b": %s}' % (A, B)
time_start = time.time()
req_count = 500
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
timeout=30,
assert_status=200,
assert_data=data.encode(),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
assert time.time() - time_start < 5
| 23.837838 | 60 | 0.544785 | [
"Apache-2.0"
] | 418sec/BentoML | tests/integration/api_server/test_microbatch.py | 1,764 | Python |
import os
import numpy as np
from scipy.stats import multivariate_normal
import inspect
from sklearn.metrics.pairwise import pairwise_distances
def sample(transition_matrix, means, covs, start_state, n_samples,
random_state):
n_states, n_features, _ = covs.shape
states = np.zeros(n_samples, dtype='int')
emissions = np.zeros((n_samples, n_features))
for i in range(n_samples):
if i == 0:
prev_state = start_state
else:
prev_state = states[i - 1]
state = random_state.choice(n_states,
p=transition_matrix[:, prev_state])
emissions[i] = random_state.multivariate_normal(
means[state], covs[state])
states[i] = state
return emissions, states
def make_data(T=20):
"""
Sample data from a HMM model and compute associated CRF potentials.
"""
random_state = np.random.RandomState(0)
d = 0.2
e = 0.1
transition_matrix = np.array([[1 - 2 * d, d, d], [1 - e, e, 0],
[1 - e, 0, e]])
means = np.array([[0, 0], [10, 0], [5, -5]])
covs = np.array([[[1, 0], [0, 1]], [[.2, 0], [0, .3]], [[2, 0], [0, 1]]])
start_state = 0
emissions, states = sample(transition_matrix,
means,
covs,
start_state,
n_samples=T,
random_state=random_state)
emission_log_likelihood = []
for mean, cov in zip(means, covs):
rv = multivariate_normal(mean, cov)
emission_log_likelihood.append(rv.logpdf(emissions)[:, np.newaxis])
emission_log_likelihood = np.concatenate(emission_log_likelihood, axis=1)
log_transition_matrix = np.log(transition_matrix)
# CRF potential from HMM model
theta = emission_log_likelihood[:, :, np.newaxis] \
+ log_transition_matrix[np.newaxis, :, :]
return states, emissions, theta
def make_alignment_data():
rng = np.random.RandomState(0)
m, n = 2, 2
X = rng.randn(m, 3)
Y = rng.randn(n, 3)
return pairwise_distances(X, Y) / 10
def get_data_path(fn, subfolder='data'):
"""Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
This is from skbio's code base
https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50
"""
# getouterframes returns a list of tuples: the second tuple
# contains info about the caller, and the second element is its
# filename
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
| 33.897959 | 80 | 0.606562 | [
"BSD-3-Clause"
] | VGligorijevic/deepblast | deepblast/utils.py | 3,322 | Python |
from transformers import RobertaConfig
from modeling.hf_head.modeling_roberta_parsing import RobertaForGraphPrediction
from modeling.sequence_labeling import SequenceLabeling
if __name__ == '__main__':
config = RobertaConfig(graph_head_hidden_size_mlp_arc=100, graph_head_hidden_size_mlp_rel=100, dropout_classifier=0.1)
#config.graph_head_hidden_size_mlp_arc = 100
model = RobertaForGraphPrediction(config)
SequenceLabeling(
)
# 1. GIVE IT TO PYTORCH LIGHTNING
# 2. DEFINE DATA MODULE FOR PARSING --> INPUT + LOSS: TRY TO FIT
# 3. Prediction (recover full graph after bpes-
breakpoint()
| 31.5 | 122 | 0.77619 | [
"Apache-2.0"
] | benjamin-mlr/lightning-language-modeling | parser.py | 630 | Python |
"""
Defines useful extended internal coordinate frames
"""
import numpy as np
import McUtils.Numputils as nput
from McUtils.Coordinerds import (
ZMatrixCoordinateSystem, CartesianCoordinateSystem, CoordinateSystemConverter,
ZMatrixCoordinates, CartesianCoordinates3D, CoordinateSet, CoordinateSystemConverters
)
from .MoleculeInterface import AbstractMolecule
__all__ = [
"MolecularZMatrixCoordinateSystem",
"MolecularCartesianCoordinateSystem"
]
__reload_hook__ = [".MoleculeInterface"]
def _get_best_axes(first_pos, axes):
"""
Determine the best pair of inertial axes so that we don't get large-scale breakdowns from the choice of embedding
:param first_pos:
:type first_pos:
:param axes:
:type axes:
:return:
:rtype:
"""
if axes.ndim > 2:
axes = axes[..., (0, 1), :]
ax_choice = (0, 1)
ax_names = ["A", "B"]
else:
fp_norm = np.linalg.norm(first_pos)
if fp_norm > 1.0e-10: # not chilling at the origin...
first_pos = first_pos / fp_norm
# check if it lies along an axis or is perpendicular to an axis
a_proj = np.dot(first_pos, axes[0])
b_proj = np.dot(first_pos, axes[1])
c_proj = np.dot(first_pos, axes[2])
if np.abs(b_proj) < .05: # lies in the A/C plane
if np.abs(a_proj) > .95:
ax_choice = (1, 2)
ax_names = ["B", "C"]
else:
ax_choice = (0, 1)
ax_names = ["A", "B"]
elif np.abs(c_proj) < .05: # lies in the A/B plane
if np.abs(a_proj) > .95:
ax_choice = (1, 2)
ax_names = ["B", "C"]
else:
ax_choice = (0, 2)
ax_names = ["A", "C"]
elif np.abs(a_proj) < .05: # lies in the B/C plane
if np.abs(b_proj) > .95:
ax_choice = (0, 2)
ax_names = ["A", "C"]
else:
ax_choice = (0, 1)
ax_names = ["A", "B"]
else: # not in any of the planes so no issues
ax_choice = (0, 1)
ax_names = ["A", "B"]
else:
ax_choice = (0, 1)
ax_names = ["A", "B"]
axes = axes[ax_choice,]
return axes, ax_names, ax_choice
class MolecularZMatrixCoordinateSystem(ZMatrixCoordinateSystem):
"""
Mirrors the standard ZMatrix coordinate system in _almost_ all regards, but forces an embedding
"""
name = "MolecularZMatrix"
def __init__(self, molecule, converter_options=None, **opts):
"""
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
"""
self.molecule = molecule
if converter_options is None:
converter_options = opts
opts = {}
nats = len(molecule.atoms)
super().__init__(converter_options=converter_options, dimension=(nats, 3), coordinate_shape=(nats, 3), opts=opts)
self.set_embedding()
@property
def origins(self):
return self.converter_options['origins']
@property
def axes(self):
return self.converter_options['axes']
def pre_convert(self, system):
self.set_embedding()
def set_embedding(self):
molecule = self.molecule
com = molecule.center_of_mass
axes = molecule.inertial_axes
converter_options = self.converter_options
if 'ordering' in converter_options:
ordering = np.array(converter_options['ordering'], dtype=int)
ordering[0, 1] = -3; ordering[0, 2] = -1; ordering[0, 3] = -2
ordering[1, 2] = -1; ordering[1, 3] = -2
ordering[2, 3] = -2
converter_options['ordering'] = ordering
first = ordering[0, 0]
else:
first = 0
first_pos = molecule.coords[first]
axes, ax_names, ax_choice = _get_best_axes(first_pos, axes)
converter_options['origins'] = com
converter_options['axes'] = axes
converter_options['axes_labels'] = ax_names
converter_options['axes_choice'] = ax_choice
converter_options['molecule'] = molecule
def jacobian(self,
*args,
reembed=None,
strip_dummies=None,
converter_options=None,
**kwargs
):
if converter_options is None:
converter_options = {}
merged_convert_options = dict(self.converter_options, **converter_options)
try:
remb = merged_convert_options['reembed'] if reembed is None else reembed
except KeyError:
remb = None
try:
strip_dummies = merged_convert_options['strip_dummies'] if strip_dummies is None else strip_dummies
except KeyError:
strip_dummies = False
if strip_dummies:
dummies = self.molecule.dummy_positions
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(self.molecule.num_atoms),
dummies
)
try:
self.converter_options['reembed'] = True if remb is None else remb
jacs = super().jacobian(*args, converter_options=converter_options, **kwargs)
raw_jacs = []
for j in jacs:
ext_dim = j.ndim - 2
shp = sum(
((j.shape[i] // 3, 3) for i in range(ext_dim)),
()
) + j.shape[-2:]
j = j.reshape(shp)
if dummies is not None:
for i in range(ext_dim):
j = np.take(j, main_excludes, axis=2*i)
# j.shape[:i]
# + (j.shape[i] // 3, 3)
# + j.shape[i+1:]
# )
raw_jacs.append(j)
jacs = raw_jacs
return jacs
finally:
if remb is not None:
self.converter_options['reembed'] = remb
class MolecularCartesianCoordinateSystem(CartesianCoordinateSystem):
"""
Mirrors the standard Cartesian coordinate system in _almost_ all regards, but forces an embedding
"""
name= "MolecularCartesians"
def __init__(self, molecule, converter_options=None, **opts):
"""
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
"""
self.molecule = molecule #type: AbstractMolecule
nats = len(self.molecule.atoms)
if converter_options is None:
converter_options = opts
opts = {}
super().__init__(converter_options=converter_options, dimension=(nats, 3), opts=opts)
def pre_convert(self, system):
self.set_embedding()
def set_embedding(self):
"""
Sets up the embedding options...
:return:
:rtype:
"""
molecule = self.molecule
com = molecule.center_of_mass
axes = molecule.inertial_axes
converter_options = self.converter_options
if 'ordering' in converter_options:
ordering = np.array(converter_options['ordering'], dtype=int)
ordering[0, 1] = -3; ordering[0, 2] = -2; ordering[0, 3] = -1
ordering[1, 2] = -1; ordering[1, 3] = -2
ordering[2, 3] = -2
converter_options['ordering'] = ordering
first = ordering[0, 0]
else:
first = 0
first_pos = molecule.coords[first]
axes, ax_names, ax_choice = _get_best_axes(first_pos, axes)
converter_options['origins'] = com
converter_options['axes'] = axes
converter_options['axes_labels'] = ax_names
converter_options['axes_choice'] = ax_choice
converter_options['molecule'] = molecule
def jacobian(self,
coords,
system,
strip_dummies=None,
converter_options=None,
analytic_deriv_order=None,
**kwargs
):
if converter_options is None:
converter_options = {}
merged_convert_options = dict(self.converter_options, **converter_options)
try:
strip_dummies = merged_convert_options['strip_dummies'] if strip_dummies is None else strip_dummies
except KeyError:
strip_dummies = False
try:
analytic_deriv_order = merged_convert_options['analytic_deriv_order'] if analytic_deriv_order is None else analytic_deriv_order
except KeyError:
analytic_deriv_order = 0
if strip_dummies:
dummies = self.molecule.dummy_positions
if len(dummies) == 0:
dummies = None
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(self.molecule.num_atoms),
dummies
)
else:
main_excludes = None
jacs = super().jacobian(coords, system, analytic_deriv_order=analytic_deriv_order, converter_options=converter_options, **kwargs)
raw_jacs = []
for n,j in enumerate(jacs): # this expects a full filling of the jacobians which maybe I need to not expect...
baseline = 2*analytic_deriv_order + len(coords.shape)
ext_dim = j.ndim - baseline
shp = sum(
((j.shape[i] // 3, 3) for i in range(ext_dim)),
()
) + j.shape[-baseline:]
j = j.reshape(shp)
if dummies is not None:
for i in range(ext_dim):
j = np.take(j, main_excludes, axis=2*i)
for i in range(analytic_deriv_order):
j = np.take(j, main_excludes, axis=-2*(i+2))
if len(coords.shape) > 2:
j = np.moveaxis(j, -3, 0)
raw_jacs.append(j)
jacs = raw_jacs
return jacs
class MolecularCartesianToZMatrixConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularCartesianCoordinateSystem, MolecularZMatrixCoordinateSystem)
def convert(self, coords, molecule=None, origins=None, axes=None, ordering=None, **kwargs):
"""
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords:
:type coords: CoordinateSet
:param molecule:
:type molecule:
:param origins:
:type origins:
:param axes:
:type axes:
:param ordering:
:type ordering:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
zmcs, opts = self.convert_many(np.array([coords]),
molecule=molecule, origins=origins, axes=axes, ordering=ordering, **kwargs)
zmcs = zmcs[0]
if 'derivs' in opts:
derivs = opts['derivs']
reshaped_derivs = [None] * len(derivs)
for i, v in enumerate(derivs):
reshaped_derivs[i] = v[0]
opts['derivs'] = reshaped_derivs
return zmcs, opts
def convert_many(self, coords,
molecule=None,
origins=None, axes=None,
ordering=None,
strip_embedding=True,
strip_dummies=False,
**kwargs):
"""
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords: coordinates in Cartesians to convert
:type coords: np.ndarray
:param molecule:
:type molecule: AbstractMolecule
:param origins: the origin for each individual structure
:type origins: np.ndarray
:param axes: the axes for each structure
:type axes: np.ndarray
:param ordering: the Z-matrix ordering spec
:type ordering:
:param strip_embedding: whether to strip the embedding coordinates
:type strip_embedding:
:param strip_dummies: whether to strip all dummy coordinates
:type strip_dummies:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
# we add three dummy atoms at the origins and along the axes before doing the conversion
if origins.ndim == 1:
origins = np.broadcast_to(origins[np.newaxis, np.newaxis], (n_sys, 1, 3))
elif origins.ndim == 2:
origins = origins[:, np.newaxis, :]
if axes.ndim == 2:
axes = np.broadcast_to(axes[np.newaxis], (n_sys, 2, 3))
if origins.shape[0] != n_sys:
if n_sys % origins.shape[0] != 0:
raise ValueError("inconsistent shapes; origins shape {} but coords shape {}".format(
origins.shape,
coords.shape
))
num_coords = n_sys // origins.shape[0]
origins = np.broadcast_to(origins[:, np.newaxis, :, :], (origins.shape[0], num_coords) + origins.shape[1:])
origins = origins.reshape((n_sys,) + origins.shape[2:])
if axes.shape[0] != n_sys:
if n_sys % axes.shape[0] != 0:
raise ValueError("inconsistent shapes; axes shape {} but coords shape {}".format(
axes.shape,
coords.shape
))
num_coords = n_sys // axes.shape[0]
axes = np.broadcast_to(axes[:, np.newaxis, :, :], (axes.shape[0], num_coords) + axes.shape[1:])
axes = axes.reshape((n_sys,) + axes.shape[2:])
coords = np.concatenate([origins, origins+axes, coords], axis=1)
if ordering is not None:
ordering = np.array(ordering, dtype=int)
ordering[0, 1] = -3; ordering[0, 2] = -2; ordering[0, 3] = -1
ordering[1, 2] = -2; ordering[1, 3] = -1
ordering[2, 3] = -1
ordering = ordering + 3
ordering = np.concatenate([ [[0, -1, -1, -1], [1, 0, -1, -1], [2, 0, 1, -1]], ordering])
# print("...?", ordering)
res = CoordinateSet(coords, CartesianCoordinates3D).convert(ZMatrixCoordinates,
ordering=ordering,
origins=origins,
axes=axes,
**kwargs
)
if isinstance(res, tuple):
zmcs, opts = res
else:
zmcs = res
opts=res.converter_options
opts['ordering'] = opts['ordering'][3:] - 3
# zmcs = zmcs[:, 2:]
if strip_dummies:
dummies = [0, 1, 2] + [x+3 for x in molecule.dummy_positions] # add on axes
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(len(molecule.atoms) + 3),
dummies
)
sub_excludes = main_excludes - 1 # drop one fewer terms to drop I think...
if 'derivs' in opts:
derivs = opts['derivs']
reshaped_derivs = [None] * len(derivs)
deriv_excludes = np.arange(3, len(molecule.atoms) + 3)
for i, v in enumerate(derivs):
# drop all terms relating to the embedding of the embedding
start_dim = v.ndim - 2*(i+2)
for j in range(start_dim, v.ndim-2, 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=-2)
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
zmcs = zmcs[..., sub_excludes, :]
# raise Exception(derivs.shape)
return zmcs, opts
MolecularCartesianToZMatrixConverter = MolecularCartesianToZMatrixConverter()
MolecularCartesianToZMatrixConverter.register(CoordinateSystemConverters)
class MolecularCartesianToRegularCartesianConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularCartesianCoordinateSystem, CartesianCoordinateSystem)
def convert(self, coords, **kw):
return coords, kw
def convert_many(self, coords, **kwargs):
"""
Converts from Cartesian to ZMatrix coords, preserving the embedding
"""
return coords, kwargs
MolecularCartesianToRegularCartesianConverter = MolecularCartesianToRegularCartesianConverter()
MolecularCartesianToRegularCartesianConverter.register()
class MolecularZMatrixToCartesianConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularZMatrixCoordinateSystem, MolecularCartesianCoordinateSystem)
def convert(self, coords, **kw):
total_points, opts = self.convert_many(coords[np.newaxis], **kw)
return total_points[0], opts
def convert_many(self, coords, molecule=None, origins=None, axes=None, ordering=None,
reembed=False, axes_choice=None, return_derivs=None,
strip_dummies=False,
strip_embedding=True,
planar_ref_tolerance=None,
**kwargs):
"""
Converts from Cartesian to ZMatrix coords, attempting to preserve the embedding
"""
from .Molecule import Molecule
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
if n_coords != n_atoms + 2:
# means we already added the embedding
if n_coords != n_atoms:
raise ValueError('Embedding unclear when num_coords ({}) < num_atoms ({})'.format(
n_coords,
n_atoms
))
x_ax = axes[..., 0, :]
y_ax = axes[..., 1, :]
extra_norms0 = nput.vec_norms(x_ax)
extra_norms1 = nput.vec_norms(y_ax)
extra_angles, _ = nput.vec_angles(x_ax, y_ax)
extra_coords = np.zeros((n_sys, 2, 3))
extra_coords[..., 0, 0] = extra_norms0
extra_coords[..., 1, 0] = extra_norms1
extra_coords[..., 1, 1] = extra_angles
coords = np.concatenate([extra_coords, coords], axis=-2)
if ordering is not None:
ordering = np.array(ordering, dtype=int)
ordering = ordering + 3
ordering = np.concatenate([ [[0, -1, -1, -1], [1, 0, -1, -1], [2, 0, 1, -1]], ordering])
refuse_derivs = reembed and coords.squeeze().ndim != 2
res = CoordinateSet(coords, ZMatrixCoordinates).convert(CartesianCoordinates3D,
ordering=ordering,
origins=origins,
axes=axes,
return_derivs=(return_derivs and not refuse_derivs),
**kwargs)
if isinstance(res, tuple):
carts, opts = res
else:
carts = res
opts = res.converter_options
if reembed:
if molecule is None:
raise ValueError("can't reembed without a reference structure")
embed_carts = carts[..., 3:, :]
reembed = not (
carts.squeeze().ndim == 2 and
np.allclose(molecule.coords, embed_carts, atol=1.0e-5)
) # agree to like a ten thousandth of an angstrom
if reembed:
if not return_derivs:
embed_carts = molecule.embed_coords(embed_carts, planar_ref_tolerance=planar_ref_tolerance)
carts = np.concatenate([
carts[..., :3, :],
embed_carts
],
axis=-2
)
else:
inert_coords, coord_coms, coord_axes = Molecule(molecule.atoms, embed_carts).principle_axis_data
if axes_choice is None:
axes_choice = (0, 1)
guh = self.convert_many(coords,
origins=coord_coms,
axes=coord_axes[:, axes_choice],
molecule=molecule,
reembed=False,
ordering=ordering,
return_derivs=return_derivs,
axes_choice=axes_choice,
**kwargs
)
return guh
opts['origins'] = origins
opts['axes'] = axes
if ordering is not None:
opts['ordering'] = ordering[3:] - 3
if strip_dummies:
# raise Exception("wwwwaaaaaaaaat")
dummies = [0, 1, 2] + [x + 3 for x in molecule.dummy_positions] # add on axes
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(len(molecule.atoms) + 3),
dummies
)
sub_excludes = main_excludes - 1 # drop one fewer terms to drop I think...
if 'derivs' in opts:
derivs = opts['derivs']
reshaped_derivs = [None] * len(derivs)
deriv_excludes = np.arange(3, len(molecule.atoms) + 3)
for i, v in enumerate(derivs):
# drop all terms relating to the embedding of the embedding
start_dim = v.ndim - i
for j in range(start_dim, v.ndim, 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=-2)
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
carts = carts[..., main_excludes, :]
return carts, opts
MolecularZMatrixToCartesianConverter = MolecularZMatrixToCartesianConverter()
MolecularZMatrixToCartesianConverter.register()
class MolecularZMatrixToRegularZMatrixConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularZMatrixCoordinateSystem, ZMatrixCoordinateSystem)
def convert(self, coords, **kw):
return coords, kw
def convert_many(self, coords, **kwargs):
return coords, kwargs
MolecularZMatrixToRegularZMatrixConverter = MolecularZMatrixToRegularZMatrixConverter()
MolecularZMatrixToRegularZMatrixConverter.register()
| 38.200972 | 139 | 0.530208 | [
"MIT"
] | McCoyGroup/Coordinerds | Psience/Molecools/CoordinateSystems.py | 23,570 | Python |
# mypy: allow-untyped-defs
import os.path
from unittest.mock import patch
from tools.manifest.manifest import Manifest
from tools.wpt import testfiles
def test_getrevish_kwarg():
assert testfiles.get_revish(revish="abcdef") == "abcdef"
assert testfiles.get_revish(revish="123456\n") == "123456"
def test_getrevish_implicit():
with patch("tools.wpt.testfiles.branch_point", return_value="base"):
assert testfiles.get_revish() == "base..HEAD"
def test_affected_testfiles():
manifest_json = {
"items": {
"crashtest": {
"a": {
"b": {
"c": {
"foo-crash.html": [
"acdefgh123456",
["null", {}],
]
}
}
}
}
},
"url_base": "/",
"version": 8,
}
manifest = Manifest.from_json("/", manifest_json)
with patch("tools.wpt.testfiles.load_manifest", return_value=manifest):
# Dependent affected tests are determined by walking the filesystem,
# which doesn't work in our test setup. We would need to refactor
# testfiles.affected_testfiles or have a more complex test setup to
# support testing those.
full_test_path = os.path.join(
testfiles.wpt_root, "a", "b", "c", "foo-crash.html")
tests_changed, _ = testfiles.affected_testfiles([full_test_path])
assert tests_changed == {full_test_path}
def test_exclude_ignored():
default_ignored = [
"resources/testharness.js",
"resources/testharnessreport.js",
"resources/testdriver.js",
"resources/testdriver-vendor.js",
]
default_ignored_abs = sorted(os.path.join(testfiles.wpt_root, x) for x in default_ignored)
default_changed = [
"foo/bar.html"
]
default_changed_abs = sorted(os.path.join(testfiles.wpt_root, x) for x in default_changed)
files = default_ignored + default_changed
changed, ignored = testfiles.exclude_ignored(files, None)
assert sorted(changed) == default_changed_abs
assert sorted(ignored) == default_ignored_abs
changed, ignored = testfiles.exclude_ignored(files, [])
assert sorted(changed) == sorted(default_changed_abs + default_ignored_abs)
assert sorted(ignored) == []
| 33.666667 | 94 | 0.60396 | [
"BSD-3-Clause"
] | BasixKOR/wpt | tools/wpt/tests/test_testfiles.py | 2,424 | Python |
#!/usr/bin/env python
"""
_Exists_
Oracle implementation of JobGroup.Exists
"""
__all__ = []
from WMCore.WMBS.MySQL.JobGroup.Exists import Exists as ExistsJobGroupMySQL
class Exists(ExistsJobGroupMySQL):
pass
| 13.6875 | 75 | 0.757991 | [
"Apache-2.0"
] | JAmadoTest/WMCore | src/python/WMCore/WMBS/Oracle/JobGroup/Exists.py | 219 | Python |
from unittest import TestCase
from src.adders import HalfAdder, FullAdder, FourBitFullAdder
from tests.utils import decimal_to_boolean_list
class HalfAdderTests(TestCase):
TRUTH_TABLE = (
# A B S Cout
((False, False), (False, False)),
((False, True), (True, False)),
((True, False), (True, False)),
((True, True), (False, True)),
)
def setUp(self):
self.half_adder = HalfAdder()
def test_truth_table(self):
for test_case in self.TRUTH_TABLE:
assert self.half_adder.set_inputs(*test_case[0]) == test_case[1]
class FullAdderTests(TestCase):
TRUTH_TABLE = (
# A B Cin S Cout
((False, False, False), (False, False)),
((False, False, True), (True, False)),
((False, True, False), (True, False)),
((False, True, True), (False, True)),
((True, False, False), (True, False)),
((True, False, True), (False, True)),
((True, True, False), (False, True)),
((True, True, True), (True, True)),
)
def setUp(self):
self.full_adder = FullAdder()
def test_truth_table(self):
for test_case in self.TRUTH_TABLE:
assert self.full_adder.set_inputs(*test_case[0]) == test_case[1]
class FourBitFullAdderTests(TestCase):
def setUp(self):
self.full_adder = FourBitFullAdder()
self.TRUTH_TABLE = []
# Generate the truth table, since it is HUGE for a 4 bit adder
# Note: it will generate items like:
# (((False, True, False, False), (False, False, True, True)), (False, False, True, True, True))
# and
# (((False, True, True, False), (False, True, True, True)), (False, True, True, False, True))
# for 4 + 3 = 7 and 6 + 7 = 13, respectively
for addend_1 in range(0, 16):
for addend_2 in range(0, 16):
self.TRUTH_TABLE.append(
(
(decimal_to_boolean_list(addend_1, padding=4), decimal_to_boolean_list(addend_2, padding=4)),
decimal_to_boolean_list(addend_1 + addend_2, padding=5),
)
)
def test_truth_table(self):
for test_case in self.TRUTH_TABLE:
# Note, generate the inputs arguments by setting both addends and the carry in (which is always 0 *false*)
inputs = (test_case[0][0], test_case[0][1], False)
assert self.full_adder.set_inputs(*inputs) == test_case[1]
# Test adding 15+15 with a carry in, which will result in 31
assert (
self.full_adder.set_inputs(
value_1=(True, True, True, True),
value_2=(True, True, True, True),
carry_in=True,
)
== (True, True, True, True, True)
)
| 35.121951 | 118 | 0.557292 | [
"MIT"
] | fgarci03/pylectronics | tests/test_adders.py | 2,880 | Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from scrapy.spiders import Spider
from scrapy.spiders import Request
import json
from hexun.items import HexunItem
from utils.urlUtils import UrlUtils
from utils.dateTimeUtils import DateTimeUtils
class PPSpider(Spider):
name = 'pp'
urlTemplate = 'http://webftcn.hermes.hexun.com/shf/minute?code=DCEpp{0}&start={1}&number=225&t=1513835351321'
start_urls = [
]
allowed_domains = ['*.hexun.com']
def start_requests(self):
contractList = DateTimeUtils.getContractList()
for contract in contractList:
url = self.urlTemplate.format(contract, DateTimeUtils.getStartTime())
yield Request(url=url, callback=self.parseItem)
def parseItem(self, response):
jsonData = json.loads(response.body_as_unicode().strip(';').strip('(').strip(')'))
datas = jsonData['Data'][0]
contractName = self.getContractName(response)
for dataItem in datas:
lldpeItem = HexunItem()
lldpeItem['product'] = contractName
lldpeItem['dateTime'] = dataItem[0]
lldpeItem['price'] = dataItem[1]
lldpeItem['amount'] = dataItem[2]
lldpeItem['volumn'] = dataItem[3]
lldpeItem['avePrice'] = dataItem[4]
lldpeItem['openInterest'] = dataItem[5]
yield lldpeItem
def getContractName(self, response):
code = UrlUtils.getQueryValue(response.url, 'code')[-4:]
return self.name + code
| 35.093023 | 113 | 0.644135 | [
"MIT"
] | judypol/pytonStudy | hexun/hexun/spiders/ppSpider.py | 1,509 | Python |
# Standard Library
import copy
import json
import re
from .log_helper import default_logger as logger
def format_cfg(cfg):
"""Format experiment config for friendly display"""
# json_str = json.dumps(cfg, indent=2, ensure_ascii=False)
# return json_str
def list2str(cfg):
for key, value in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if len(value) == 0 or isinstance(value[0], (int, float)):
cfg[key] = str(value)
else:
for i, item in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split("\n")
# json_str = [re.sub(r"(\"|,$|\{|\}|\[$|\s$)", "", line) for line in json_str if line.strip() not in "{}[]"]
json_str = [re.sub(r"(\"|(!\],$)|\s$)", "", line) for line in json_str]
cfg_str = "\n".join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
def is_number(num):
pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
res = pattern.match(num)
if res:
return True
return False
def try_decode(val):
"""bool, int, float, or str"""
if val.upper() == 'FALSE':
return False
elif val.upper() == 'TRUE':
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
def merge_opts_into_cfg(opts, cfg):
cfg = copy.deepcopy(cfg)
if opts is None or len(opts) == 0:
return cfg
assert len(opts) % 2 == 0
keys, values = opts[0::2], opts[1::2]
for key, val in zip(keys, values):
logger.info(f'replacing {key}')
val = try_decode(val)
cur_cfg = cfg
# for hooks
if '-' in key:
key_p, key_s = key.split('-')
k_module, k_type = key_p.split('.')
cur_cfg = cur_cfg[k_module]
flag_exist = False
for idx in range(len(cur_cfg)):
if cur_cfg[idx]['type'] != k_type:
continue
flag_exist = True
cur_cfg_temp = cur_cfg[idx]
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
if not flag_exist:
_cur_cfg = {}
cur_cfg_temp = _cur_cfg
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
cur_cfg.append(_cur_cfg)
else:
key = key.split('.')
for k in key[:-1]:
cur_cfg = cur_cfg.setdefault(k, {})
cur_cfg[key[-1]] = val
return cfg
def upgrade_cfg(cfg):
# cfg = upgrade_fp16(cfg)
return cfg
| 30.901961 | 112 | 0.510152 | [
"Apache-2.0"
] | ModelTC/EOD | up/utils/general/cfg_helper.py | 3,152 | Python |
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import logging
from time import sleep
from streamsets.testframework.markers import sdc_min_version
from streamsets.sdk.sdc_models import Metrics
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_stage_lib('streamsets-datacollector-groovy_2_4-lib')
return hook
# SDC-11777: provide way to easily see where a pipeline is when it is stuck in STARTING
@sdc_min_version('3.15.0')
def test_runner_metrics_for_init_and_destroy(sdc_builder, sdc_executor):
"""Ensure that we properly update metrics when the runner is in starting phase."""
builder = sdc_builder.get_pipeline_builder()
SLEEP_SCRIPT = "sleep(5*1000)"
# Super simple cluster pipeline
source = builder.add_stage('Dev Data Generator')
groovy = builder.add_stage('Groovy Evaluator', type='processor')
groovy.init_script = SLEEP_SCRIPT
groovy.destroy_script = SLEEP_SCRIPT
groovy.script = SLEEP_SCRIPT
trash = builder.add_stage('Trash')
source >> groovy >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
# Start the pipeline, it should take at least 5 seconds (since the sleep) and we check that at least once
# we have seen the metrics we're looking for.
sdc_executor.start_pipeline(pipeline, wait=False)
count = 0
while True:
# TLKT-468: SDC object doesn't expose get_pipeline_metrics method
metrics_json = sdc_executor.api_client.get_pipeline_metrics(pipeline.id)
if metrics_json:
metrics = Metrics(metrics_json)
logger.info(f"Detected runtime gauge state {metrics.gauge('runner.0.gauge').value['state']}")
if metrics.gauge('runner.0.gauge').value['state'] == 'Starting':
count += 1
status = sdc_executor.get_pipeline_status(pipeline).response.json()
sleep(0.5)
if status.get('status') == 'RUNNING':
break
assert count > 0
sdc_executor.stop_pipeline(pipeline)
| 33.884615 | 109 | 0.72115 | [
"Apache-2.0"
] | anubandhan/datacollector-tests | pipeline/test_metrics.py | 2,643 | Python |
#!/usr/bin/env python
#
# This program shows how to use MPI_Alltoall. Each processor
# send/rec a different random number to/from other processors.
#
# numpy is required
import numpy
from numpy import *
# mpi4py module
from mpi4py import MPI
import sys
def myquit(mes):
MPI.Finalize()
print(mes)
sys.exit()
# Initialize MPI and print out hello
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
# We are going to send/recv a single value to/from
# each processor. Here we allocate arrays
s_vals=zeros(numprocs,"i")
r_vals=zeros(numprocs,"i")
# Fill the send arrays with random numbers
random.seed(myid)
for i in range(0, numprocs):
s_vals[i]=random.randint(1,10)
print("myid=",myid,"s_vals=",s_vals)
# Send/recv to/from all
comm.Alltoall(s_vals, r_vals)
print("myid=",myid,"r_vals=",r_vals)
MPI.Finalize()
# Note, the sent values and the recv values are
# like a transpose of each other
#
# mpiexec -n 4 ./P_ex07.py | grep s_v | sort
# myid= 0 s_vals= [6 1 4 4]
# myid= 1 s_vals= [6 9 6 1]
# myid= 2 s_vals= [9 9 7 3]
# myid= 3 s_vals= [9 4 9 9]
# mpiexec -n 4 ./P_ex07.py | grep r_v | sort
# myid= 0 r_vals= [6 6 9 9]
# myid= 1 r_vals= [1 9 9 4]
# myid= 2 r_vals= [4 6 7 9]
# myid= 3 r_vals= [4 1 3 9]
| 20.453125 | 63 | 0.675325 | [
"Unlicense"
] | timkphd/examples | array/bot/others/P_ex07.py | 1,309 | Python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''topology_context.py'''
import os
from collections import namedtuple
from heronpy.api.task_hook import (ITaskHook, EmitInfo, SpoutAckInfo,
SpoutFailInfo, BoltExecuteInfo,
BoltAckInfo, BoltFailInfo)
from heronpy.api.topology_context import TopologyContext
import heronpy.api.api_constants as api_constants
from heron.instance.src.python.utils.metrics import MetricsCollector
import heron.instance.src.python.utils.system_constants as system_constants
import heron.common.src.python.pex_loader as pex_loader
class TopologyContextImpl(TopologyContext):
"""Implemention of TopologyContext
This is created by Heron Instance and passed on to the topology spouts/bolts
as the topology context
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, config, topology, task_to_component, my_task_id, metrics_collector,
topo_pex_path):
self.config = config
self.topology = topology
self.task_to_component_map = task_to_component
self.task_id = my_task_id
self.metrics_collector = metrics_collector
self.topology_pex_path = os.path.abspath(topo_pex_path)
inputs, outputs, out_fields = self._get_inputs_and_outputs_and_outfields(topology)
self.inputs = inputs
self.outputs = outputs
self.component_to_out_fields = out_fields
# init task hooks
self.task_hooks = []
self._init_task_hooks()
##### Implementation of interface methods #####
def get_task_id(self):
"""Property to get the task id of this component"""
return self.task_id
def get_component_id(self):
"""Property to get the component id of this component"""
return self.task_to_component_map.get(self.get_task_id())
def get_cluster_config(self):
"""Returns the cluster config for this component
Note that the returned config is auto-typed map: <str -> any Python object>.
"""
return self.config
def get_topology_name(self):
"""Returns the name of the topology
"""
return str(self.topology.name)
def register_metric(self, name, metric, time_bucket_in_sec):
"""Registers a new metric to this context"""
collector = self.get_metrics_collector()
collector.register_metric(name, metric, time_bucket_in_sec)
def get_sources(self, component_id):
"""Returns the declared inputs to specified component
:return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or
None if not found
"""
# this is necessary because protobuf message is not hashable
StreamId = namedtuple('StreamId', 'id, component_name')
if component_id in self.inputs:
ret = {}
for istream in self.inputs.get(component_id):
key = StreamId(id=istream.stream.id, component_name=istream.stream.component_name)
ret[key] = istream.gtype
return ret
else:
return None
def get_this_sources(self):
return self.get_sources(self.get_component_id())
def get_component_tasks(self, component_id):
"""Returns the task ids allocated for the given component id"""
ret = []
for task_id, comp_id in self.task_to_component_map.items():
if comp_id == component_id:
ret.append(task_id)
return ret
def add_task_hook(self, task_hook):
"""Registers a specified task hook to this context
:type task_hook: heron.instance.src.python.utils.topology.ITaskHook
:param task_hook: Implementation of ITaskHook
"""
if not isinstance(task_hook, ITaskHook):
raise TypeError("In add_task_hook(): attempt to add non ITaskHook instance, given: %s"
% str(type(task_hook)))
self.task_hooks.append(task_hook)
##### Other exposed implementation specific methods #####
def get_topology_pex_path(self):
"""Returns the topology's pex file path"""
return self.topology_pex_path
def get_metrics_collector(self):
"""Returns this context's metrics collector"""
if self.metrics_collector is None or not isinstance(self.metrics_collector, MetricsCollector):
raise RuntimeError("Metrics collector is not registered in this context")
return self.metrics_collector
########################################
@classmethod
def _get_inputs_and_outputs_and_outfields(cls, topology):
inputs = {}
outputs = {}
out_fields = {}
for spout in topology.spouts:
inputs[spout.comp.name] = [] # spout doesn't have any inputs
outputs[spout.comp.name] = spout.outputs
out_fields.update(cls._get_output_to_comp_fields(spout.outputs))
for bolt in topology.bolts:
inputs[bolt.comp.name] = bolt.inputs
outputs[bolt.comp.name] = bolt.outputs
out_fields.update(cls._get_output_to_comp_fields(bolt.outputs))
return inputs, outputs, out_fields
@staticmethod
def _get_output_to_comp_fields(outputs):
out_fields = {}
for out_stream in outputs:
comp_name = out_stream.stream.component_name
stream_id = out_stream.stream.id
if comp_name not in out_fields:
out_fields[comp_name] = dict()
# get the fields of a particular output stream
ret = []
for kt in out_stream.schema.keys:
ret.append(kt.key)
out_fields[comp_name][stream_id] = tuple(ret)
return out_fields
######### Task hook related ##########
def _init_task_hooks(self):
task_hooks_cls_list = self.get_cluster_config().get(api_constants.TOPOLOGY_AUTO_TASK_HOOKS,
None)
if task_hooks_cls_list is None:
return
# load pex first
topo_pex_path = self.get_topology_pex_path()
pex_loader.load_pex(topo_pex_path)
for class_name in task_hooks_cls_list:
try:
task_hook_cls = pex_loader.import_and_get_class(topo_pex_path, class_name)
task_hook_instance = task_hook_cls()
assert isinstance(task_hook_instance, ITaskHook)
self.task_hooks.append(task_hook_instance)
except AssertionError:
raise RuntimeError("Auto-registered task hook not instance of ITaskHook")
except Exception as e:
raise RuntimeError("Error with loading task hook class: %s, with error message: %s"
% (class_name, str(e)))
def invoke_hook_prepare(self):
"""invoke task hooks for after the spout/bolt's initialize() method"""
for task_hook in self.task_hooks:
task_hook.prepare(self.get_cluster_config(), self)
def invoke_hook_cleanup(self):
"""invoke task hooks for just before the spout/bolt's cleanup method"""
for task_hook in self.task_hooks:
task_hook.clean_up()
def invoke_hook_emit(self, values, stream_id, out_tasks):
"""invoke task hooks for every time a tuple is emitted in spout/bolt
:type values: list
:param values: values emitted
:type stream_id: str
:param stream_id: stream id into which tuple is emitted
:type out_tasks: list
:param out_tasks: list of custom grouping target task id
"""
if len(self.task_hooks) > 0:
emit_info = EmitInfo(values=values, stream_id=stream_id,
task_id=self.get_task_id(), out_tasks=out_tasks)
for task_hook in self.task_hooks:
task_hook.emit(emit_info)
def invoke_hook_spout_ack(self, message_id, complete_latency_ns):
"""invoke task hooks for every time spout acks a tuple
:type message_id: str
:param message_id: message id to which an acked tuple was anchored
:type complete_latency_ns: float
:param complete_latency_ns: complete latency in nano seconds
"""
if len(self.task_hooks) > 0:
spout_ack_info = SpoutAckInfo(message_id=message_id,
spout_task_id=self.get_task_id(),
complete_latency_ms=complete_latency_ns *
system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.spout_ack(spout_ack_info)
def invoke_hook_spout_fail(self, message_id, fail_latency_ns):
"""invoke task hooks for every time spout fails a tuple
:type message_id: str
:param message_id: message id to which a failed tuple was anchored
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
"""
if len(self.task_hooks) > 0:
spout_fail_info = SpoutFailInfo(message_id=message_id,
spout_task_id=self.get_task_id(),
fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.spout_fail(spout_fail_info)
def invoke_hook_bolt_execute(self, heron_tuple, execute_latency_ns):
"""invoke task hooks for every time bolt processes a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is executed
:type execute_latency_ns: float
:param execute_latency_ns: execute latency in nano seconds
"""
if len(self.task_hooks) > 0:
bolt_execute_info = \
BoltExecuteInfo(heron_tuple=heron_tuple,
executing_task_id=self.get_task_id(),
execute_latency_ms=execute_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.bolt_execute(bolt_execute_info)
def invoke_hook_bolt_ack(self, heron_tuple, process_latency_ns):
"""invoke task hooks for every time bolt acks a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is acked
:type process_latency_ns: float
:param process_latency_ns: process latency in nano seconds
"""
if len(self.task_hooks) > 0:
bolt_ack_info = BoltAckInfo(heron_tuple=heron_tuple,
acking_task_id=self.get_task_id(),
process_latency_ms=process_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.bolt_ack(bolt_ack_info)
def invoke_hook_bolt_fail(self, heron_tuple, fail_latency_ns):
"""invoke task hooks for every time bolt fails a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is failed
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
"""
if len(self.task_hooks) > 0:
bolt_fail_info = BoltFailInfo(heron_tuple=heron_tuple,
failing_task_id=self.get_task_id(),
fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.bolt_fail(bolt_fail_info)
| 37.759197 | 100 | 0.692471 | [
"Apache-2.0"
] | kalimfaria/heron | heron/instance/src/python/utils/topology/topology_context_impl.py | 11,290 | Python |
'''
Created on Sep 18, 2017
@author: jschm
'''
from cs115 import map
def powerset(lst):
"""returns the power set of the list - the set of all subsets of the list"""
if lst == []:
return [[]]
#power set is a list of lists
#this way is more efficent for getting the combinations of the characters in a list
lose_it = powerset(lst[1:])
use_it = map(lambda subset: [lst[0]] + subset, lose_it)
return lose_it + use_it
print(powerset(['a', 'b', 'c']))
def subset(target, lst):
"""determines whether or not it is possible to create target sum using the
values in the list. Values in teh list can be positive, negative, or zero."""
if target == 0:
return True
#what if target is 0?
if lst == []:
return False
#use_it = subset(target - lst[0], lst[1:])
#lose_it = subset(target, lst[1:])
"""and and or are short-cut operators in python. THe second operand is not evaluated
when the overall result can be deduced by evaluating the second operand"""
#return use_it or lose_it
return subset(target - lst[0], lst[1:]) or subset(target, lst[1:])
print(subset(5,[1,3,2,4,5]))
def subset_with_values(target, lst):
"""Determines whether or not it is possible to create the target sum using
values in the list. Values in the list can be positive, negative, or zero.
The function returns a tuple of exactly two items. The first is a boolean,
that indicates true if the sum is possible and false if it is not. The second
element in the tuple is a list of all values that add up to make the target sum."""
if target == 0:
return(True, [])
if lst == []:
return(False, [])
use_it = subset_with_values(target - lst[0], lst[1:])
if use_it[0]:
return(True, [lst[0]] + use_it[1])
return subset_with_values(target, lst[1:])
print(subset_with_values(8, [7,2,2,2,2]))
print(subset_with_values(12, [1,2,4,9]))
"""
def LCSWithValues2(S1,S2):
if S1 == "" or S2 == "":
return (0, "")
if S1[0] == S2[0]:
result = result + S1[0]
return (1 + LCSWithValues2(S1[1:], S2[1:]), result)
useS1 = LCSWithValues2(S1, S2[1:])
useS2 = LCSWithValues2(S1[1:], S2)
if useS1[0] > useS2[0]:
return useS1
return useS2
print(LCSWithValues2("sam", "spam"))
"""
def LCSWithValues(S1,S2):
"""returns the longest common string"""
if S1 == "" or S2 == "":
return (0, "")
if S1[0] == S2[0]:
result = LCSWithValues(S1[1:], S2[1:])
return (1 + result[0], S1[0] + result[1])
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if useS1[0] > useS2[0]:
return useS1
return useS2
print(LCSWithValues("sam", "spam"))
#^^^the LCSWithValues2 does not work because the result variable needs to be defined, and if it is redefined it stays empty always.
def coin_row(lst):
#one line:
return 0 if lst == [] else max(lst[0] + coin_row(lst[2:]), coin_row(lst[1:]))
"""
if(lst == []):
return 0
return max(lst[0] + coin_row(lst[2:]), coin_row(lst[1:]))
"""
"""
if(lst == []):
return 0
use_it = lst[0] + coin_row(lst[2:])
lose_it = coin_row(lst[1:])
return max(use_it, lose_it)
This is how you set up each function^^^
and then you can make it nicer
"""
"""
if(coin_row(lst[1:])>lst[0]):
amount = coin_row(lst[1:])
return max(coin_row(lst[2:]), coin_row(lst[2:]))
"""
def coin_row_with_values(lst):
if lst == []:
return [0, []]
use_it = coin_row_with_values(lst[2:])
new_sum = lst[0] + use_it[0]
#that's the result^
lose_it = coin_row_with_values(lst[1:])
if new_sum > lose_it[0]:
#only returns this once I think
#nevermind!
#print('hello')
return [new_sum, [lst[0]] + use_it[1]]
return lose_it
print(coin_row([10, 5, 5, 5, 10, 10, 1, 1]))
print(coin_row_with_values([10, 5, 5, 5, 10, 50, 1, 10, 1, 1, 25]))
#can use below as spell-checker
def distance(first, second):
if first == '':
return len(second)
if second == '':
return len(first)
if first[0] == second[0]:
return distance(first[1:], second[1:])
substitution = 1 + distance(first[1:], second[1:])
deletion = 1 + distance(first[1:], second)
insertion = 1 + distance(first, second[1:])
return min(substitution, deletion, insertion)
| 31.635714 | 131 | 0.604651 | [
"MIT"
] | jschmidtnj/CS115 | use_it_or_lose_it.py | 4,429 | Python |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with tf.train.SequenceExamples."""
import math
import tensorflow.compat.v1 as tf
QUEUE_CAPACITY = 500
SHUFFLE_MIN_AFTER_DEQUEUE = QUEUE_CAPACITY // 5
def _shuffle_inputs(input_tensors, capacity, min_after_dequeue, num_threads):
"""Shuffles tensors in `input_tensors`, maintaining grouping."""
shuffle_queue = tf.RandomShuffleQueue(
capacity, min_after_dequeue, dtypes=[t.dtype for t in input_tensors])
enqueue_op = shuffle_queue.enqueue(input_tensors)
runner = tf.train.QueueRunner(shuffle_queue, [enqueue_op] * num_threads)
tf.train.add_queue_runner(runner)
output_tensors = shuffle_queue.dequeue()
for i in range(len(input_tensors)):
output_tensors[i].set_shape(input_tensors[i].shape)
return output_tensors
def get_padded_batch(file_list, batch_size, input_size, label_shape=None,
num_enqueuing_threads=4, shuffle=False):
"""Reads batches of SequenceExamples from TFRecords and pads them.
Can deal with variable length SequenceExamples by padding each batch to the
length of the longest sequence with zeros.
Args:
file_list: A list of paths to TFRecord files containing SequenceExamples.
batch_size: The number of SequenceExamples to include in each batch.
input_size: The size of each input vector. The returned batch of inputs
will have a shape [batch_size, num_steps, input_size].
label_shape: Shape for labels. If not specified, will use [].
num_enqueuing_threads: The number of threads to use for enqueuing
SequenceExamples.
shuffle: Whether to shuffle the batches.
Returns:
inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.
labels: A tensor of shape [batch_size, num_steps] of int64s.
lengths: A tensor of shape [batch_size] of int32s. The lengths of each
SequenceExample before padding.
Raises:
ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.
"""
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(file_queue)
sequence_features = {
'inputs': tf.FixedLenSequenceFeature(shape=[input_size],
dtype=tf.float32),
'labels': tf.FixedLenSequenceFeature(shape=label_shape or [],
dtype=tf.int64)}
_, sequence = tf.parse_single_sequence_example(
serialized_example, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
input_tensors = [sequence['inputs'], sequence['labels'], length]
if shuffle:
if num_enqueuing_threads < 2:
raise ValueError(
'`num_enqueuing_threads` must be at least 2 when shuffling.')
shuffle_threads = int(math.ceil(num_enqueuing_threads) / 2.)
# Since there may be fewer records than SHUFFLE_MIN_AFTER_DEQUEUE, take the
# minimum of that number and the number of records.
min_after_dequeue = count_records(
file_list, stop_at=SHUFFLE_MIN_AFTER_DEQUEUE)
input_tensors = _shuffle_inputs(
input_tensors, capacity=QUEUE_CAPACITY,
min_after_dequeue=min_after_dequeue,
num_threads=shuffle_threads)
num_enqueuing_threads -= shuffle_threads
tf.logging.info(input_tensors)
return tf.train.batch(
input_tensors,
batch_size=batch_size,
capacity=QUEUE_CAPACITY,
num_threads=num_enqueuing_threads,
dynamic_pad=True,
allow_smaller_final_batch=False)
def count_records(file_list, stop_at=None):
"""Counts number of records in files from `file_list` up to `stop_at`.
Args:
file_list: List of TFRecord files to count records in.
stop_at: Optional number of records to stop counting at.
Returns:
Integer number of records in files from `file_list` up to `stop_at`.
"""
num_records = 0
for tfrecord_file in file_list:
tf.logging.info('Counting records in %s.', tfrecord_file)
for _ in tf.python_io.tf_record_iterator(tfrecord_file):
num_records += 1
if stop_at and num_records >= stop_at:
tf.logging.info('Number of records is at least %d.', num_records)
return num_records
tf.logging.info('Total records: %d', num_records)
return num_records
def flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):
"""Flattens the batch of sequences, removing padding (if applicable).
Args:
maybe_padded_sequences: A tensor of possibly padded sequences to flatten,
sized `[N, M, ...]` where M = max(lengths).
lengths: Optional length of each sequence, sized `[N]`. If None, assumes no
padding.
Returns:
flatten_maybe_padded_sequences: The flattened sequence tensor, sized
`[sum(lengths), ...]`.
"""
def flatten_unpadded_sequences():
# The sequences are equal length, so we should just flatten over the first
# two dimensions.
return tf.reshape(maybe_padded_sequences,
[-1] + maybe_padded_sequences.shape.as_list()[2:])
if lengths is None:
return flatten_unpadded_sequences()
def flatten_padded_sequences():
indices = tf.where(tf.sequence_mask(lengths))
return tf.gather_nd(maybe_padded_sequences, indices)
return tf.cond(
tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]),
flatten_unpadded_sequences,
flatten_padded_sequences)
| 37.223602 | 80 | 0.724345 | [
"Apache-2.0"
] | KenniVelez/magenta | magenta/common/sequence_example_lib.py | 5,993 | Python |
from dataclasses import dataclass
from bindings.csw.query_type import QueryType
__NAMESPACE__ = "http://www.opengis.net/cat/csw/2.0.2"
@dataclass
class Query(QueryType):
class Meta:
namespace = "http://www.opengis.net/cat/csw/2.0.2"
| 22.545455 | 58 | 0.729839 | [
"Apache-2.0"
] | NIVANorge/s-enda-playground | catalog/bindings/csw/query.py | 248 | Python |
"""
gaeenv
~~~~~~~
Google App Engine Virtual Environment builder.
"""
import os
from setuptools import setup, find_packages
from gaeenv.main import gaeenv_version
def read_file(file_name):
return open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
file_name
)
).read()
ldesc = read_file('README')
ldesc += "\n\n" + read_file('CHANGES')
setup(
name='gaeenv',
version=gaeenv_version,
url='https://github.com/llinder/gaeenv',
license='Apache 2.0',
author='Lance Linder',
author_email='[email protected]',
description="Goole App Engine Virtualenv tools",
long_description=ldesc,
packages = find_packages(exclude="test"),
install_requires = ['requests>=2.2.0'],
entry_points={
'console_scripts': ['gaeenv = gaeenv.main:main']
},
zip_safe=False,
platforms='any',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 25.829787 | 70 | 0.633443 | [
"Apache-2.0"
] | signalpillar/gaeenv | setup.py | 1,214 | Python |
# based on: https://github.com/ShiqiYu/libfacedetection.train/blob/74f3aa77c63234dd954d21286e9a60703b8d0868/tasks/task1/yufacedetectnet.py # noqa
import math
from enum import Enum
from typing import Callable, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.geometry.bbox import nms as nms_kornia
__all__ = [
"FaceDetector",
"FaceDetectorResult",
"FaceKeypoint",
]
url: str = "https://github.com/ShiqiYu/libfacedetection.train/raw/master/tasks/task1/weights/yunet_final.pth"
class FaceKeypoint(Enum):
r"""Define the keypoints detected in a face.
The left/right convention is based on the screen viewer.
"""
EYE_LEFT = 0
EYE_RIGHT = 1
NOSE = 2
MOUTH_LEFT = 3
MOUTH_RIGHT = 4
class FaceDetectorResult:
r"""Encapsulate the results obtained by the :py:class:`kornia.contrib.FaceDetector`.
Args:
data: the encoded results coming from the feature detector with shape :math:`(14,)`.
"""
def __init__(self, data: torch.Tensor) -> None:
if len(data) < 15:
raise ValueError(f"Result must comes as vector of size(14). Got: {data.shape}.")
self._data = data
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> "FaceDetectorResult":
"""Like :func:`torch.nn.Module.to()` method."""
self._data = self._data.to(device=device, dtype=dtype)
return self
@property
def xmin(self) -> torch.Tensor:
"""The bounding box top-left x-coordinate."""
return self._data[..., 0]
@property
def ymin(self) -> torch.Tensor:
"""The bounding box top-left y-coordinate."""
return self._data[..., 1]
@property
def xmax(self) -> torch.Tensor:
"""The bounding box bottom-right x-coordinate."""
return self._data[..., 2]
@property
def ymax(self) -> torch.Tensor:
"""The bounding box bottom-right y-coordinate."""
return self._data[..., 3]
def get_keypoint(self, keypoint: FaceKeypoint) -> torch.Tensor:
"""The [x y] position of a given facial keypoint.
Args:
keypoint: the keypoint type to return the position.
"""
if keypoint == FaceKeypoint.EYE_LEFT:
out = self._data[..., (4, 5)]
elif keypoint == FaceKeypoint.EYE_RIGHT:
out = self._data[..., (6, 7)]
elif keypoint == FaceKeypoint.NOSE:
out = self._data[..., (8, 9)]
elif keypoint == FaceKeypoint.MOUTH_LEFT:
out = self._data[..., (10, 11)]
elif keypoint == FaceKeypoint.MOUTH_RIGHT:
out = self._data[..., (12, 13)]
else:
raise ValueError(f"Not valid keypoint type. Got: {keypoint}.")
return out
@property
def score(self) -> torch.Tensor:
"""The detection score."""
return self._data[..., 14]
@property
def width(self) -> torch.Tensor:
"""The bounding box width."""
return self.xmax - self.xmin
@property
def height(self) -> torch.Tensor:
"""The bounding box height."""
return self.ymax - self.ymin
@property
def top_left(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
return self._data[..., (0, 1)]
@property
def top_right(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
out = self.top_left
out[..., 0] += self.width
return out
@property
def bottom_right(self) -> torch.Tensor:
"""The [x y] position of the bottom-right coordinate of the bounding box."""
return self._data[..., (2, 3)]
@property
def bottom_left(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
out = self.top_left
out[..., 1] += self.height
return out
class FaceDetector(nn.Module):
r"""Detect faces in a given image using a CNN.
By default, it uses the method described in :cite:`facedetect-yu`.
Args:
top_k: the maximum number of detections to return before the nms.
confidence_threshold: the threshold used to discard detections.
nms_threshold: the threshold used by the nms for iou.
keep_top_k: the maximum number of detections to return after the nms.
Return:
A tensor of shape :math:`(N,15)` to be used with :py:class:`kornia.contrib.FaceDetectorResult`.
Example:
>>> img = torch.rand(1, 3, 320, 320)
>>> detect = FaceDetector()
>>> res = detect(img)
"""
def __init__(self,
top_k: int = 5000,
confidence_threshold: float = 0.3,
nms_threshold: float = 0.3,
keep_top_k: int = 750) -> None:
super().__init__()
self.top_k = top_k
self.confidence_threshold = confidence_threshold
self.nms_threshold = nms_threshold
self.keep_top_k = keep_top_k
self.config = {
'name': 'YuFaceDetectNet',
'min_sizes': [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]],
'steps': [8, 16, 32, 64],
'variance': [0.1, 0.2],
'clip': False,
}
self.min_sizes: List[List[int]] = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
self.steps: List[int] = [8, 16, 32, 64]
self.variance: List[float] = [0.1, 0.2]
self.clip: bool = False
self.model = YuFaceDetectNet('test', pretrained=True)
self.nms: Callable = nms_kornia
def preprocess(self, image: torch.Tensor) -> torch.Tensor:
return image
def postprocess(self, data: Dict[str, torch.Tensor], height: int, width: int) -> torch.Tensor:
loc, conf, iou = data['loc'], data['conf'], data['iou']
scale = torch.tensor([
width, height, width, height,
width, height, width, height,
width, height, width, height,
width, height,
], device=loc.device, dtype=loc.dtype) # 14
priors = _PriorBox(self.min_sizes, self.steps, self.clip, image_size=(height, width))
priors = priors.to(loc.device, loc.dtype)
boxes = _decode(loc, priors(), self.variance) # Nx14
boxes = boxes * scale
# clamp here for the compatibility for ONNX
cls_scores, iou_scores = conf[:, 1], iou[:, 0]
scores = (cls_scores * iou_scores.clamp(0., 1.)).sqrt()
# ignore low scores
inds = (scores > self.confidence_threshold)
boxes, scores = boxes[inds], scores[inds]
# keep top-K before NMS
order = scores.sort(descending=True)[1][:self.top_k]
boxes, scores = boxes[order], scores[order]
# performd NMS
# NOTE: nms need to be revise since does not export well to onnx
dets = torch.cat((boxes, scores[:, None]), dim=-1) # Nx15
keep = self.nms(boxes[:, :4], scores, self.nms_threshold)
if len(keep) > 0:
dets = dets[keep, :]
# keep top-K faster NMS
return dets[:self.keep_top_k]
def forward(self, image: torch.Tensor) -> torch.Tensor:
img = self.preprocess(image)
out = self.model(img)
return self.postprocess(out, img.shape[-2], img.shape[-1])
# utils for the network
class ConvDPUnit(nn.Sequential):
def __init__(self, in_channels, out_channels, withBNRelu=True):
super().__init__()
self.add_module("conv1", nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=True, groups=1))
self.add_module("conv2", nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=True, groups=out_channels))
if withBNRelu:
self.add_module("bn", nn.BatchNorm2d(out_channels))
self.add_module("relu", nn.ReLU(inplace=True))
class Conv_head(nn.Sequential):
def __init__(self, in_channels: int, mid_channels: int, out_channels: int) -> None:
super().__init__()
self.add_module("conv1", nn.Conv2d(in_channels, mid_channels, 3, 2, 1, bias=True, groups=1))
self.add_module("bn1", nn.BatchNorm2d(mid_channels))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module("conv2", ConvDPUnit(mid_channels, out_channels))
class Conv4layerBlock(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int, withBNRelu: bool = True) -> None:
super().__init__()
self.add_module("conv1", ConvDPUnit(in_channels, in_channels, True))
self.add_module("conv2", ConvDPUnit(in_channels, out_channels, withBNRelu))
class YuFaceDetectNet(nn.Module):
def __init__(self, phase, pretrained: bool):
super().__init__()
self.phase = phase
self.num_classes = 2
self.model0 = Conv_head(3, 16, 16)
self.model1 = Conv4layerBlock(16, 64)
self.model2 = Conv4layerBlock(64, 64)
self.model3 = Conv4layerBlock(64, 64)
self.model4 = Conv4layerBlock(64, 64)
self.model5 = Conv4layerBlock(64, 64)
self.model6 = Conv4layerBlock(64, 64)
self.head = nn.Sequential(
Conv4layerBlock(64, 3 * (14 + 2 + 1), False),
Conv4layerBlock(64, 2 * (14 + 2 + 1), False),
Conv4layerBlock(64, 2 * (14 + 2 + 1), False),
Conv4layerBlock(64, 3 * (14 + 2 + 1), False),
)
if self.phase == 'train':
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.02)
else:
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# use torch.hub to load pretrained model
if pretrained:
pretrained_dict = torch.hub.load_state_dict_from_url(
url, map_location=lambda storage, loc: storage
)
self.load_state_dict(pretrained_dict, strict=True)
self.eval()
def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
detection_sources, head_list = [], []
x = self.model0(x)
x = F.max_pool2d(x, 2)
x = self.model1(x)
x = self.model2(x)
x = F.max_pool2d(x, 2)
x = self.model3(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model4(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model5(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model6(x)
detection_sources.append(x)
for i, h in enumerate(self.head):
x_tmp = h(detection_sources[i])
head_list.append(x_tmp.permute(0, 2, 3, 1).contiguous())
head_data = torch.cat([o.view(o.size(0), -1) for o in head_list], 1)
head_data = head_data.view(head_data.size(0), -1, 17)
loc_data, conf_data, iou_data = head_data.split((14, 2, 1), dim=-1)
if self.phase == "test":
loc_data = loc_data.view(-1, 14)
conf_data = torch.softmax(conf_data.view(-1, self.num_classes), dim=-1)
iou_data = iou_data.view(-1, 1)
else:
loc_data = loc_data.view(loc_data.size(0), -1, 14)
conf_data = conf_data.view(conf_data.size(0), -1, self.num_classes)
iou_data = iou_data.view(iou_data.size(0), -1, 1)
return {"loc": loc_data, "conf": conf_data, "iou": iou_data}
# utils for post-processing
# Adapted from https://github.com/Hakuyume/chainer-ssd
def _decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:
"""Decode locations from predictions using priors to undo the encoding we did for offset regression at train
time.
Args:
loc:location predictions for loc layers. Shape: [num_priors,4].
priors: Prior boxes in center-offset form. Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes.
Return:
Tensor containing decoded bounding box predictions.
"""
boxes = torch.cat((
priors[:, 0:2] + loc[:, 0:2] * variances[0] * priors[:, 2:4],
priors[:, 2:4] * torch.exp(loc[:, 2:4] * variances[1]),
priors[:, 0:2] + loc[:, 4:6] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 6:8] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 8:10] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 10:12] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 12:14] * variances[0] * priors[:, 2:4]), 1)
# prepare final output
tmp = boxes[:, 0:2] - boxes[:, 2:4] / 2
return torch.cat((tmp, boxes[:, 2:4] + tmp, boxes[:, 4:]), dim=-1)
class _PriorBox:
def __init__(self, min_sizes: List[List[int]], steps: List[int], clip: bool, image_size: Tuple[int, int]) -> None:
self.min_sizes = min_sizes
self.steps = steps
self.clip = clip
self.image_size = image_size
self.device: torch.device = torch.device('cpu')
self.dtype: torch.dtype = torch.float32
for i in range(4):
if(self.steps[i] != math.pow(2, (i + 3))):
raise ValueError("steps must be [8,16,32,64]")
self.feature_map_2th = [int(int((self.image_size[0] + 1) / 2) / 2),
int(int((self.image_size[1] + 1) / 2) / 2)]
self.feature_map_3th = [int(self.feature_map_2th[0] / 2),
int(self.feature_map_2th[1] / 2)]
self.feature_map_4th = [int(self.feature_map_3th[0] / 2),
int(self.feature_map_3th[1] / 2)]
self.feature_map_5th = [int(self.feature_map_4th[0] / 2),
int(self.feature_map_4th[1] / 2)]
self.feature_map_6th = [int(self.feature_map_5th[0] / 2),
int(self.feature_map_5th[1] / 2)]
self.feature_maps = [self.feature_map_3th, self.feature_map_4th,
self.feature_map_5th, self.feature_map_6th]
def to(self, device: torch.device, dtype: torch.dtype) -> '_PriorBox':
self.device = device
self.dtype = dtype
return self
def __call__(self) -> torch.Tensor:
anchors: List[float] = []
for k, f in enumerate(self.feature_maps):
min_sizes: List[int] = self.min_sizes[k]
# NOTE: the nested loop it's to make torchscript happy
for i in range(f[0]):
for j in range(f[1]):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
cx = (j + 0.5) * self.steps[k] / self.image_size[1]
cy = (i + 0.5) * self.steps[k] / self.image_size[0]
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.tensor(anchors, device=self.device, dtype=self.dtype).view(-1, 4)
if self.clip:
output = output.clamp(max=1, min=0)
return output
| 36.995181 | 146 | 0.579887 | [
"ECL-2.0",
"Apache-2.0"
] | Abdelrhman-Hosny/kornia | kornia/contrib/face_detection.py | 15,353 | Python |
"""
Module Doc String
"""
EMOTIONS = [
"sentimental",
"afraid",
"proud",
"faithful",
"terrified",
"joyful",
"angry",
"sad",
"jealous",
"grateful",
"prepared",
"embarrassed",
"excited",
"annoyed",
"lonely",
"ashamed",
"guilty",
"surprised",
"nostalgic",
"confident",
"furious",
"disappointed",
"caring",
"trusting",
"disgusted",
"anticipating",
"anxious",
"hopeful",
"content",
"impressed",
"apprehensive",
"devastated",
]
def main():
""" Driver """
if __name__ == "__main__":
main()
| 12.77551 | 26 | 0.496805 | [
"MIT"
] | ajyl/KEMP | common.py | 626 | Python |
from django.utils import translation
from django.utils.translation.trans_real import (
to_language as django_to_language,
parse_accept_lang_header as django_parse_accept_lang_header
)
from django.test import RequestFactory, TestCase
from django.urls import reverse
from .. import language_code_to_iso_3166, parse_accept_lang_header, to_language
from ..utils import queue_ga_event
class UtilsTestCase(TestCase):
def test_get_language_code_to_iso_3166(self):
self.assertEqual(language_code_to_iso_3166('en-gb'), 'en-GB')
self.assertEqual(language_code_to_iso_3166('en-us'), 'en-US')
self.assertEqual(language_code_to_iso_3166('fr'), 'fr')
def test_to_language(self):
self.assertEqual(to_language('en_US'), 'en-US')
def test_parse_accept_lang_header_returns_iso_3166_language(self):
self.assertEqual(
parse_accept_lang_header('en-GB,en;q=0.5'),
(('en-GB', 1.0), ('en', 0.5)),
)
def test_queue_ga_event_new(self):
request = RequestFactory().get('/')
request.session = self.client.session
queue_ga_event(request, ['send', 'event', 'foo'])
self.assertEqual(request.session['ga_events'], [['send', 'event', 'foo']])
def test_queue_ga_event_append(self):
request = RequestFactory().get('/')
request.session = self.client.session
request.session['ga_events'] = [['send', 'event', 'foo']]
queue_ga_event(request, ['send', 'event', 'bar'])
self.assertEqual(request.session['ga_events'], [['send', 'event', 'foo'], ['send', 'event', 'bar']])
self.assertTrue(request.session.modified)
class UtilsIntegrationTestCase(TestCase):
"""
Test that our overrides to Django translation functions work.
"""
def test_to_language(self):
self.assertEqual(django_to_language('en_US'), 'en-US')
def test_parse_accept_lang_header_returns_iso_3166_language(self):
self.assertEqual(
django_parse_accept_lang_header('en-GB,en;q=0.5'),
(('en-GB', 1.0), ('en', 0.5)),
)
def test_reverse_produces_correct_url_prefix(self):
translation.activate('en-GB')
url = reverse('payments:completed')
self.assertTrue(url.startswith('/en-GB/'))
translation.deactivate()
| 36.809524 | 108 | 0.675722 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | 10allday-Software/donate-wagtail | donate/core/tests/test_utils.py | 2,319 | Python |
# global
import ivy
import abc
import importlib
from typing import List
# local
from ivy_builder.specs.spec import Spec
from ivy_builder.specs import DatasetSpec
from ivy_builder.specs.spec import locals_to_kwargs
# ToDo: fix cyclic imports, so this method can be imported from the builder module
def load_class_from_str(full_str):
mod_str = '.'.join(full_str.split('.')[:-1])
class_str = full_str.split('.')[-1]
return getattr(importlib.import_module(mod_str), class_str)
class NetworkSpec(Spec, abc.ABC):
def __init__(self, dataset_spec: DatasetSpec = None, dev_strs: List[str] = None,
v_keychains=None, keep_v_keychains=False, build_mode='explicit', **kwargs) -> None:
"""
base class for storing general specifications of the neural network
"""
kw = locals_to_kwargs(locals())
super().__init__(dataset_spec=dataset_spec,
dev_strs=dev_strs,
v_keychains=v_keychains,
keep_v_keychains=keep_v_keychains,
build_mode=build_mode,
**kwargs)
if 'subnets' in self:
for k, subet_spec in self.subnets.items():
if 'network_spec_class' in subet_spec:
if isinstance(subet_spec.network_spec_class, str):
spec_class = load_class_from_str(subet_spec.network_spec_class)
else:
spec_class = subet_spec.network_spec_class
if isinstance(kwargs['subnets'][k], spec_class):
subet_spec = kwargs['subnets'][k]
else:
subet_spec = spec_class(**{**kwargs['subnets'][k],
**dict(dataset_spec=dataset_spec, dev_strs=dev_strs)})
self.subnets[k] = subet_spec
if isinstance(subet_spec.network_class, str):
self.subnets[k].network_class = load_class_from_str(subet_spec.network_class)
else:
self.subnets[k].network_class = subet_spec.network_class
self.subnets[k].store_vars = ivy.default(self.subnets[k].if_exists('store_vars'), True)
self.subnets[k].build_mode = ivy.default(self.subnets[k].if_exists('build_mode'), self.build_mode)
self.subnets[k].dataset_spec = dataset_spec
self.subnets[k].dev_strs = dev_strs
self._kwargs = kw
| 45.196429 | 114 | 0.594232 | [
"Apache-2.0"
] | ivy-dl/builder | ivy_builder/specs/network_spec.py | 2,531 | Python |
# coding: utf-8
import pprint
import re
import six
class SetBackupPolicyRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'backup_policy': 'BackupPolicy'
}
attribute_map = {
'backup_policy': 'backup_policy'
}
def __init__(self, backup_policy=None):
"""SetBackupPolicyRequestBody - a model defined in huaweicloud sdk"""
self._backup_policy = None
self.discriminator = None
self.backup_policy = backup_policy
@property
def backup_policy(self):
"""Gets the backup_policy of this SetBackupPolicyRequestBody.
:return: The backup_policy of this SetBackupPolicyRequestBody.
:rtype: BackupPolicy
"""
return self._backup_policy
@backup_policy.setter
def backup_policy(self, backup_policy):
"""Sets the backup_policy of this SetBackupPolicyRequestBody.
:param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.
:type: BackupPolicy
"""
self._backup_policy = backup_policy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SetBackupPolicyRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.481481 | 83 | 0.562937 | [
"Apache-2.0"
] | JeffreyDin/huaweicloud-sdk-python-v3 | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py | 2,860 | Python |
# -*- coding: utf-8 -*-
"""
Rewrite ot.bregman.sinkhorn in Python Optimal Transport (https://pythonot.github.io/_modules/ot/bregman.html#sinkhorn)
using pytorch operations.
Bregman projections for regularized OT (Sinkhorn distance).
"""
import torch
M_EPS = 1e-16
def sinkhorn(a, b, C, reg=1e-1, method='sinkhorn', maxIter=1000, tau=1e3,
stopThr=1e-9, verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"""
Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
method : str
method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or
'sinkhorn_epsilon_scaling', see those function for specific parameters
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
"""
if method.lower() == 'sinkhorn':
return sinkhorn_knopp(a, b, C, reg, maxIter=maxIter,
stopThr=stopThr, verbose=verbose, log=log,
warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
**kwargs)
elif method.lower() == 'sinkhorn_stabilized':
return sinkhorn_stabilized(a, b, C, reg, maxIter=maxIter, tau=tau,
stopThr=stopThr, verbose=verbose, log=log,
warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
**kwargs)
elif method.lower() == 'sinkhorn_epsilon_scaling':
return sinkhorn_epsilon_scaling(a, b, C, reg,
maxIter=maxIter, maxInnerIter=100, tau=tau,
scaling_base=0.75, scaling_coef=None, stopThr=stopThr,
verbose=False, log=log, warm_start=warm_start, eval_freq=eval_freq,
print_freq=print_freq, **kwargs)
else:
raise ValueError("Unknown method '%s'." % method)
def sinkhorn_knopp(a, b, C, reg=1e-1, maxIter=1000, stopThr=1e-9,
verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"""
Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
"""
device = a.device
na, nb = C.shape
assert na >= 1 and nb >= 1, 'C needs to be 2d'
assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
assert reg > 0, 'reg should be greater than 0'
assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
# unnecessary check for our special case
if log:
log = {'err': []}
if warm_start is not None:
u = warm_start['u']
v = warm_start['v']
else:
u = torch.ones(na, dtype=a.dtype).to(device) / na
v = torch.ones(nb, dtype=b.dtype).to(device) / nb
K = torch.empty(C.shape, dtype=C.dtype).to(device)
torch.div(C, -reg, out=K)
torch.exp(K, out=K)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
# allocate memory beforehand
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
while (err > stopThr and it <= maxIter):
upre, vpre = u, v
torch.matmul(u, K, out=KTu)
v = torch.div(b, KTu + M_EPS)
torch.matmul(K, v, out=Kv)
u = torch.div(a, Kv + M_EPS)
if torch.any(torch.isnan(u)) or torch.any(torch.isnan(v)) or \
torch.any(torch.isinf(u)) or torch.any(torch.isinf(v)):
print('Warning: numerical errors at iteration', it)
u, v = upre, vpre
break
if log and it % eval_freq == 0:
# we can speed up the process by checking for the error only all
# the eval_freq iterations
# below is equivalent to:
# b_hat = torch.sum(u.reshape(-1, 1) * K * v.reshape(1, -1), 0)
# but with more memory efficient
b_hat = torch.matmul(u, K) * v
err = (b - b_hat).pow(2).sum().item()
# err = (b - b_hat).abs().sum().item()
log['err'].append(err)
if verbose and it % print_freq == 0:
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = reg * torch.log(u + M_EPS)
log['beta'] = reg * torch.log(v + M_EPS)
# transport plan
P = u.reshape(-1, 1) * K * v.reshape(1, -1)
if log:
return P, log
else:
return P
def sinkhorn_stabilized(a, b, C, reg=1e-1, maxIter=1000, tau=1e3, stopThr=1e-9,
verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"""
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]
but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
"""
device = a.device
na, nb = C.shape
assert na >= 1 and nb >= 1, 'C needs to be 2d'
assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
assert reg > 0, 'reg should be greater than 0'
assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
if log:
log = {'err': []}
if warm_start is not None:
alpha = warm_start['alpha']
beta = warm_start['beta']
else:
alpha = torch.zeros(na, dtype=a.dtype).to(device)
beta = torch.zeros(nb, dtype=b.dtype).to(device)
u = torch.ones(na, dtype=a.dtype).to(device) / na
v = torch.ones(nb, dtype=b.dtype).to(device) / nb
def update_K(alpha, beta):
"""log space computation"""
"""memory efficient"""
torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=K)
torch.add(K, -C, out=K)
torch.div(K, reg, out=K)
torch.exp(K, out=K)
def update_P(alpha, beta, u, v, ab_updated=False):
"""log space P (gamma) computation"""
torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=P)
torch.add(P, -C, out=P)
torch.div(P, reg, out=P)
if not ab_updated:
torch.add(P, torch.log(u + M_EPS).reshape(-1, 1), out=P)
torch.add(P, torch.log(v + M_EPS).reshape(1, -1), out=P)
torch.exp(P, out=P)
K = torch.empty(C.shape, dtype=C.dtype).to(device)
update_K(alpha, beta)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
ab_updated = False
# allocate memory beforehand
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
P = torch.empty(C.shape, dtype=C.dtype).to(device)
while (err > stopThr and it <= maxIter):
upre, vpre = u, v
torch.matmul(u, K, out=KTu)
v = torch.div(b, KTu + M_EPS)
torch.matmul(K, v, out=Kv)
u = torch.div(a, Kv + M_EPS)
ab_updated = False
# remove numerical problems and store them in K
if u.abs().sum() > tau or v.abs().sum() > tau:
alpha += reg * torch.log(u + M_EPS)
beta += reg * torch.log(v + M_EPS)
u.fill_(1. / na)
v.fill_(1. / nb)
update_K(alpha, beta)
ab_updated = True
if log and it % eval_freq == 0:
# we can speed up the process by checking for the error only all
# the eval_freq iterations
update_P(alpha, beta, u, v, ab_updated)
b_hat = torch.sum(P, 0)
err = (b - b_hat).pow(2).sum().item()
log['err'].append(err)
if verbose and it % print_freq == 0:
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = alpha + reg * torch.log(u + M_EPS)
log['beta'] = beta + reg * torch.log(v + M_EPS)
# transport plan
update_P(alpha, beta, u, v, False)
if log:
return P, log
else:
return P
def sinkhorn_epsilon_scaling(a, b, C, reg=1e-1, maxIter=100, maxInnerIter=100, tau=1e3, scaling_base=0.75,
scaling_coef=None, stopThr=1e-9, verbose=False, log=False, warm_start=None, eval_freq=10,
print_freq=200, **kwargs):
"""
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
scaling algorithm as proposed in [1] but with the log stabilization
proposed in [3] and the log scaling proposed in [2] algorithm 3.2
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
"""
na, nb = C.shape
assert na >= 1 and nb >= 1, 'C needs to be 2d'
assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
assert reg > 0, 'reg should be greater than 0'
assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
def get_reg(it, reg, pre_reg):
if it == 1:
return scaling_coef
else:
if (pre_reg - reg) * scaling_base < M_EPS:
return reg
else:
return (pre_reg - reg) * scaling_base + reg
if scaling_coef is None:
scaling_coef = C.max() + reg
it = 1
err = 1
running_reg = scaling_coef
if log:
log = {'err': []}
warm_start = None
while (err > stopThr and it <= maxIter):
running_reg = get_reg(it, reg, running_reg)
P, _log = sinkhorn_stabilized(a, b, C, running_reg, maxIter=maxInnerIter, tau=tau,
stopThr=stopThr, verbose=False, log=True,
warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
**kwargs)
warm_start = {}
warm_start['alpha'] = _log['alpha']
warm_start['beta'] = _log['beta']
primal_val = (C * P).sum() + reg * (P * torch.log(P)).sum() - reg * P.sum()
dual_val = (_log['alpha'] * a).sum() + (_log['beta'] * b).sum() - reg * P.sum()
err = primal_val - dual_val
log['err'].append(err)
if verbose and it % print_freq == 0:
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['alpha'] = _log['alpha']
log['beta'] = _log['beta']
return P, log
else:
return P
| 35.175258 | 157 | 0.583118 | [
"MIT"
] | SelmanOzleyen/DRDM-Count | losses/bregman_pytorch.py | 17,062 | Python |
from typing import List
def bubblesort(nums: List[int]):
""" sort list """
for i in range(0, len(nums)):
for j in range(0, len(nums) - i - 1):
if nums[j] > nums[j + 1]:
tmp = nums[j]
nums[j] = nums[j + 1]
nums[j + 1] = tmp
return nums
| 24.384615 | 45 | 0.451104 | [
"CC0-1.0"
] | vscode-debug-specs/python | bubblesort/bubblesort_logic.py | 317 | Python |
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the
# cli_rm_builder.
#
# Manually editing this file is not advised.
#
# To update the argspec make the desired changes
# in the module docstring and re-run
# cli_rm_builder.
#
#############################################
"""
The arg spec for the vyos_route_maps module
"""
class Route_mapsArgs(object): # pylint: disable=R0903
"""The arg spec for the vyos_route_maps module"""
def __init__(self, **kwargs):
pass
argument_spec = {
"config": {
"type": "list",
"elements": "dict",
"options": {
"route_map": {"type": "str"},
"entries": {
"aliases": ["rules"],
"type": "list",
"elements": "dict",
"options": {
"sequence": {"type": "int"},
"call": {"type": "str"},
"description": {"type": "str"},
"action": {
"type": "str",
"choices": ["deny", "permit"],
},
"continue_sequence": {"type": "int"},
"set": {
"type": "dict",
"options": {
"aggregator": {
"type": "dict",
"options": {
"ip": {"type": "str"},
"as": {"type": "str"},
},
},
"as_path_exclude": {"type": "str"},
"as_path_prepend": {"type": "str"},
"atomic_aggregate": {"type": "bool"},
"bgp_extcommunity_rt": {"type": "str"},
"comm_list": {
"type": "dict",
"options": {
"comm_list": {"type": "str"},
"delete": {"type": "bool"},
},
},
"community": {
"type": "dict",
"options": {"value": {"type": "str"}},
},
"extcommunity_rt": {"type": "str"},
"extcommunity_soo": {"type": "str"},
"ip_next_hop": {"type": "str"},
"ipv6_next_hop": {
"type": "dict",
"options": {
"ip_type": {
"type": "str",
"choices": ["global", "local"],
},
"value": {"type": "str"},
},
},
"large_community": {"type": "str"},
"local_preference": {"type": "str"},
"metric": {"type": "str"},
"metric_type": {
"type": "str",
"choices": ["type-1", "type-2"],
},
"origin": {
"type": "str",
"choices": ["egp", "igp", "incomplete"],
},
"originator_id": {"type": "str"},
"src": {"type": "str"},
"tag": {"type": "str"},
"weight": {"type": "str"},
},
},
"match": {
"type": "dict",
"options": {
"as_path": {"type": "str"},
"community": {
"type": "dict",
"options": {
"community_list": {"type": "str"},
"exact_match": {"type": "bool"},
},
},
"extcommunity": {"type": "str"},
"interface": {"type": "str"},
"ip": {
"type": "dict",
"options": {
"address": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
"next_hop": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
"route_source": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
},
},
"ipv6": {
"type": "dict",
"options": {
"address": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
"next_hop": {"type": "str"},
},
},
"large_community_large_community_list": {
"type": "str"
},
"metric": {"type": "int"},
"origin": {
"type": "str",
"choices": ["ebgp", "ibgp", "incomplete"],
},
"peer": {"type": "str"},
"rpki": {
"type": "str",
"choices": [
"notfound",
"invalid",
"valid",
],
},
},
},
"on_match": {
"type": "dict",
"options": {
"next": {"type": "bool"},
"goto": {"type": "int"},
},
},
},
},
},
},
"running_config": {"type": "str"},
"state": {
"type": "str",
"choices": [
"deleted",
"merged",
"overridden",
"replaced",
"gathered",
"rendered",
"parsed",
],
"default": "merged",
},
} # pylint: disable=C0301
| 45.380952 | 78 | 0.196222 | [
"MIT"
] | elixir-no-nels/usegalaxy | venv/lib/python3.6/site-packages/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/route_maps/route_maps.py | 10,483 | Python |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="rocketpy",
version="0.9.9",
install_requires=["numpy>=1.0", "scipy>=1.0", "matplotlib>=3.0", "requests"],
maintainer="RocketPy Developers",
author="Giovani Hidalgo Ceotto",
author_email="[email protected]",
description="Advanced 6-DOF trajectory simulation for High-Power Rocketry.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/giovaniceotto/RocketPy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 32.32 | 81 | 0.67203 | [
"MIT"
] | DeepWater1013/RockedPy | setup.py | 808 | Python |
# Database Lib
"""
Oracle
PostGresSQL
SQLite
SQLServer
Hive
Spark
"""
import os, datetime, pandas, time, re
from collections import namedtuple, OrderedDict
import jmespath
import sqlalchemy
from multiprocessing import Queue, Process
from xutil.helpers import (
log,
elog,
slog,
get_exception_message,
struct,
now,
get_databases,
get_dir_path,
get_profile,
get_variables,
file_exists,
str_rmv_indent,
ptable,
make_rec,
get_error_str,
)
from xutil.diskio import read_yaml, write_csvs
conns = {}
_fwklike = lambda k, v: "lower({}) like lower('{}')".format(k, v)
_fwkeq = lambda k, v: "{} = '{}'".format(k, v)
_fw = lambda sep, _fwkop, **kws: sep.join([_fwkop(k, v) for k, v in kws.items()]) # Format WHERE
fwa = lambda _fwkop=_fwkeq, **kws: _fw(' and ', _fwkop, **kws) # Format WHERE AND
fwo = lambda _fwkop=_fwkeq, **kws: _fw(' or ', _fwkop, **kws) # Format WHERE OR
rows_to_dicts = lambda rows: [row._asdict() for row in rows]
class DBConn(object):
"""Base class for database connections"""
_fix_f_name = lambda self, f: f
_to_text = lambda self, t: t
def __init__(self, conn_dict, profile=None, echo=False):
"Inititate connection"
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
# Base Template
template_base_path = '{}/database/templates/base.yaml'.format(
get_dir_path())
self.template_dict = read_yaml(template_base_path)
# Specific Type Template
template_path = '{}/database/templates/{}.yaml'.format(
get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
# Level 1
if isinstance(temp_dict[key1], dict):
if key1 not in self.template_dict:
self.template_dict[key1] = temp_dict[key1]
# Level 2
for key2 in temp_dict[key1]:
# Always Overwrite
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
# Level 1 Non-Dict Overwrite
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log("Connected to {} as {}".format(self._cred.name, self._cred.user))
def connect(self):
"""Connect to Database"""
self.engine = self.get_engine()
self.connection = self.engine.connect()
def close(self):
"""Close database connection"""
self.conn.connection.close()
def reconnect(self, min_tresh=0):
"""Re-Connect to Database if minute threshold reached"""
if (now() - self.last_connect).total_seconds() > min_tresh * 60:
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
def set_variables(self):
"""Set custom variables"""
raise Exception("Method 'set_variables' is not implemented!")
def get_dialect(self, echo=False):
"""SQLAlchemy dialect"""
raise Exception("Method 'get_dialect' is not implemented!")
def get_engine(self, echo=False):
import sqlalchemy
if not self.engine:
self.create_engine(echo=self.echo)
self.engine_inspect = sqlalchemy.inspect(self.engine)
return self.engine
def check_pk(self, table, fields):
"Check Primary key to ensure there are not duplicates"
if 'where' in fields.lower():
fields, where_clause = fields.lower().split('where')
where_clause = 'where ' + where_clause
else:
where_clause = ''
sql = '''
select
'{table}' as table,
case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result
from {table}
{where_clause}
'''.format(
table=table,
fields=fields,
where_clause=where_clause,
)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if data[0].pk_result == 'FAIL':
raise (Exception('PK Text failed for table "{}" with fields "{}"'.format(
table, fields)))
def _do_execute(self, sql):
try:
self._cursor_description = None
self.fields = None
self.result = self.connection.execute(sql)
self._cursor_description = self.result._cursor_description()
self._fields = self._get_cursor_fields()
except Exception as E:
if 'not open' in get_error_str(E):
pass # error when Oracle doesn't have a cursor open
else:
log(Exception('Error for SQL:\n' + sql))
raise E
def execute_multi(self,
sql,
dtype='namedtuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""
Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))
"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sqls = sql.split(';')
for sql in sqls:
if not sql.strip(): continue
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
yield fields, rows
def execute(self,
sql,
dtype='tuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""Execute SQL, return last result"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return fields, rows
finally:
connection.close()
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
return fields, rows
def insert(self, table, data, echo=False):
"""Insert records of namedtuple or dicts"""
raise Exception('insert not implemented')
def drop_table(self, table, log=log):
"Drop table"
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if self._template('error_filter.table_not_exist') in message:
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
def create_table(self, table, field_types, drop=False, log=log):
"Create table"
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
ftype, max_len, dec_len = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff = ''
new_ftypes[f] = self._template('general_type_map')[ftype].replace(
'()', suff)
field_types_str = ', \n'.join([
self._fix_f_name(field) + ' ' + new_ftypes[field] for field in new_ftypes
])
sql = self._template('core.create_table').format(
table=table,
col_types=field_types_str,
)
# log('Creating table: \n' + sql))
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
"Get fields of active Select cursor"
fields = OrderedDict()
cursor_desc = cursor_desc if cursor_desc else self._cursor_description
if cursor_desc == None:
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
# assign floa/double as needed
if 'cx_Oracle.NUMBER' in str(f[1]):
if f[4] and f[4] > 11: f_type = 'long'
if f[5] and f[5] > 0: f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
def stream(self,
sql,
rec_name='Record',
dtype='namedtuple',
yield_chuncks=False,
chunk_size=None,
limit=None,
echo=True):
"Stream Select from SQL, yield records as they come in"
self.reconnect(min_tresh=10)
if echo: log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = limit if limit else self.fetch_size
fetch_size = chunk_size if chunk_size else fetch_size
try:
self._do_execute(sql)
except Exception as e:
raise e
if dtype == 'tuple':
make_rec = lambda row: row
make_batch = lambda rows: rows
elif dtype == 'dataframe':
yield_chuncks=True
make_batch = lambda rows: pandas.DataFrame(rows, columns=self._fields)
else:
Record = namedtuple(
rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = lambda row: Record(*row)
make_batch = lambda rows: [make_rec(r) for r in rows]
self._stream_counter = 0
while True:
if not self._fields:
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
yield batch
else:
for row in rows:
self._stream_counter += 1
yield make_rec(row)
else:
break
if limit:
break
# log('Stream finished at {} records.'.format(self._stream_counter))
def query(self,
sql,
rec_name='Record',
dtype='namedtuple',
limit=None,
echo=True,
retrying=False,
log=log):
"Select from SQL, return list of namedtuples"
# if echo: log("Running SQL for '{}'.".format(rec_name))
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if not self.result.closed:
self.result.close()
fields = self._fields
if not fields: return []
if dtype == 'namedtuple':
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif dtype == 'tuple':
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif dtype == 'dataframe':
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise (Exception('{} is not recongnized.'.format(dtype)))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round(len(data) / secs, 1)
if echo:
log(" >>> Got {} rows in {} secs [{} r/s].".format(
len(data), secs, rate))
return data
def _split_schema_table(self, table_name):
schema, table = table_name.split('.') if '.' in table_name else (
self.username, table_name)
return schema, table
def _concat_fields(self, fields, as_text=False):
return ' || '.join(fields)
def _template(self, template_key_str):
val = jmespath.search(template_key_str, self.template_dict)
if isinstance(val, str):
val = str_rmv_indent(val)
return val
def get_schemas(self, echo=True):
"Get list of schemas."
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
# http://docs.sqlalchemy.org/en/rel_0_9/core/reflection.html#sqlalchemy.engine.reflection.Inspector.get_schemas
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows
def get_objects(self, schema, object_type='all', echo=True):
"Get metadata for objects. object_type in 'all', 'table', 'view'"
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(
schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if object_type == 'all':
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif object_type == 'table':
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif object_type == 'view':
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows
def get_tables(self, schema, echo=True):
"Get metadata for tables."
schemas = schema if isinstance(schema, list) else [schema]
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
# Getting pickle.PicklingError: Can't pickle <class 'xutil.database.base.Table'>
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows
def get_views(self, schema, echo=True):
"Get metadata for views."
schemas = schema if isinstance(schema, list) else [schema]
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
# pickle.PicklingError: Can't pickle <class 'xutil.database.base.View'>
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows
def get_columns(self,
table_name,
object_type=None,
echo=False,
include_schema_table=True,
native_type=True):
"Get column metadata for table"
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = table_name if isinstance(table_name, list) else [table_name]
for table_name in table_names:
schema, table = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if not native_type:
r_dict['type']= r_dict['type'].lower()
r_dict['type'] = r_dict['type'].split('(')[0] if '(' in r_dict[
'type'] else r_dict['type']
native_type_map = self._template('native_type_map')
if not r_dict['type'] in native_type_map:
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if k not in headers.split():
del r_dict[k]
if '(' in r_dict['type']:
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, i + 1) for i, r_dict in enumerate(rows)]
self._fields = Rec._fields
return all_rows
def get_primary_keys(self, table_name, echo=False):
"Get PK metadata for table"
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [
get_rec(col, r_dict['name'], i + 1)
for i, col in enumerate(r_dict['constrained_columns'])
]
return rows
def get_indexes(self, table_name, echo=False):
"Get indexes metadata for table"
Rec = namedtuple(
'Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for i, col in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = i + 1
yield Rec(**r_dict)
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows
def get_ddl(self, table_name, object_type=None, echo=True):
"Get ddl for table"
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(
sql_tmpl.format(
schema=schema,
table=table,
obj_type=object_type,
))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = [Rec(ddl)] if ddl else []
self._fields = Rec._fields
return rows
def get_all_columns(self):
"Get all columns for all tables / views"
sql_tmpl = self._template('metadata.all_columns')
if not sql_tmpl:
raise Exception('get_all_columns not implemented for {}'.format(
self.type))
rows = self.query(sql_tmpl)
return rows
def get_all_tables(self, filter, as_sql=False):
"Get all tables / views"
sql_tmpl = self._template('metadata.all_tables')
if not sql_tmpl:
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return sql if as_sql else self.query(sql, echo=False)
def analyze_fields(self,
analysis,
table_name,
fields=[],
as_sql=False,
union=True,
expr_func_map={},
**kwargs):
"""Base function for field level analysis
expr_func_map: contains mapping for expression to SQL function to all fields
"""
if '.' not in table_name:
raise Exception("table_name must have schema and name in it with a '.'")
if analysis not in self.template_dict['analysis']:
raise Exception("'{}' not found in template for '{}'.".format(
analysis, self.type))
schema, table = self._split_schema_table(table_name)
# get field type
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if not fields:
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = 'function.' + expr_func_map[expr]
expr_func_map[expr] = ',\n'.join([
self._template(tmpl_path).format(field=field)
for field in [r.column_name for r in field_rows]
])
sep = ' \nunion all\n' if union else ' \n ;\n'
sql = sep.join([
self._template('analysis.' + analysis).format(
schema=schema,
field=field,
table=table,
type=field_type[field.lower()] if field else '',
**expr_func_map,
**kwargs) for field in fields
])
return sql if as_sql else self.query(sql, analysis, echo=False)
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
"""Base function for table level analysis"""
if analysis not in self.template_dict['analysis']:
raise Exception("'{}' not found in template for '{}'.".format(
analysis, self.type))
if not tables and 'schema' in kwargs:
# get all tables
rows = self.get_schemas(kwargs['schema'])
crt_obj = lambda r: struct(dict(schema=r.schema, table=r.object_name))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = lambda schema, table: struct(dict(schema=schema, table=table))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([
self._template('analysis.' + analysis).format(
schema=obj.schema, table=obj.table, **kwargs) for obj in objs
])
return sql if as_sql else self.query(sql, analysis, echo=False)
def analyze_join_match(self,
t1,
t2,
t1_field,
t2_field,
t1_filter='1=1',
t2_filter='1=1',
as_sql=False,
as_text=True,
lowercase=True):
def get_kwargs(t1, t2, t1_field, t2_field, t1_filter, t2_filter):
t1_field_arr = ['t1.' + f for f in t1_field.split(',')]
t2_field_arr = ['t2.' + f for f in t2_field.split(',')]
t1_field_concat = self._concat_fields(t1_field_arr, as_text=as_text)
t2_field_concat = self._concat_fields(t2_field_arr, as_text=as_text)
to_text = self._to_text
if lowercase:
conds = ' and '.join([
'lower({}) = lower({})'.format(to_text(f), to_text(t2_field_arr[i]))
for i, f in enumerate(t1_field_arr)
])
else:
conds = ' and '.join([
'{} = {}'.format(to_text(f), to_text(t2_field_arr[i]))
for i, f in enumerate(t1_field_arr)
])
t1_fields1 = t1_field
t2_fields1 = t2_field
t1_field = ', '.join(['t1.' + f for f in t1_field_arr])
t2_field = ', '.join(['t2.' + f for f in t2_field_arr])
return dict(
t1=t1,
t1_field=t1_field_concat,
t1_fields1=t1_fields1,
t1_filter=t1_filter,
t2=t2,
t2_field=t2_field_concat,
t2_fields1=t2_fields1,
t2_filter=t2_filter,
conds=conds,
)
kwargs = get_kwargs(
t1=t1,
t2=t2,
t1_field=t1_field,
t2_field=t2_field,
t1_filter=t1_filter,
t2_filter=t2_filter,
)
sql = self.analyze_fields(
'table_join_match', t1, [''], as_sql=True, **kwargs)
return sql if as_sql else self.query(sql, 'table_join_match', echo=False)
def get_conn(db,
dbs=None,
echo=True,
reconnect=False,
use_jdbc=False,
conn_expire_min=10,
spark_hive=False) -> DBConn:
global conns
dbs = dbs if dbs else get_databases()
profile = get_profile()
db_dict = struct(dbs[db])
if db_dict.type.lower() == 'hive' and spark_hive:
db_dict.type = 'spark'
use_jdbc = True if (use_jdbc or ('use_jdbc' in db_dict
and db_dict['use_jdbc'])) else use_jdbc
if db in conns and not reconnect:
if (now() - conns[db].last_connect).total_seconds() / 60 < conn_expire_min:
return conns[db]
if use_jdbc:
log('*USING JDBC for ' + db)
from .jdbc import JdbcConn
conn = JdbcConn(db_dict, profile=profile)
elif db_dict.type.lower() == 'oracle':
from .oracle import OracleConn
conn = OracleConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'spark':
from .spark import SparkConn
conn = SparkConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'hive':
from .hive import HiveConn, Beeline
if 'use_beeline' in db_dict and db_dict.use_beeline:
conn = Beeline(db_dict, echo=echo)
else:
conn = HiveConn(db_dict, echo=echo)
elif db_dict.type.lower() in ('postgresql', 'redshift'):
from .postgresql import PostgreSQLConn
conn = PostgreSQLConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'sqlserver':
from .sqlserver import SQLServerConn
conn = SQLServerConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'sqlite':
from .sqlite import SQLiteConn
conn = SQLiteConn(db_dict, echo=echo)
else:
raise Exception(f'Type {db_dict.type} not handled!')
conns[db] = conn
return conn
class SqlX:
"""
SQL Express functions. Supports CRUD transactional operations.
Suppose there is a table named 'cache', sqlx allows:
sqlx.x('cache').insert(rows)
sqlx.x('cache').insert_one(row)
sqlx.x('cache').add(**kws)
sqlx.x('cache').delete(where)
sqlx.x('cache').update(rows, pk_fields)
sqlx.x('cache').update_one(row, pk_cols)
sqlx.x('cache').replace(rows, pk_fields)
sqlx.x('cache').query(where)
sqlx.x('cache').select_one(where)
"""
def __init__(self, conn: DBConn, table, schema, ntRec: namedtuple):
self.conn = conn
self.table = table
self.schema = schema
self.ntRec = ntRec
self.pk_fields = None
self.table_obj = schema + '.' + table if schema else table
self.insert_one = lambda row: self.insert([row])
self.add = lambda **kws: self.insert([self.ntRec(**kws)])
self.update_one = lambda row, pk_cols=None: self.update([row], pk_cols)
self.update_rec=lambda pk_cols=None, **kws: self.update([make_rec(**kws)], pk_cols)
self.replace_one = lambda row, pk_cols=None: self.replace([row], pk_cols)
self.replace_rec=lambda pk_cols=None, **kws: self.replace([make_rec(**kws)], pk_cols)
# self.select_one = lambda where: self.select_one(where, one=True)
def _get_pk(self):
if not self.pk_fields:
pk_rows = self.conn.get_primary_keys(self.table_obj)
self.pk_fields = [r.column_name for r in pk_rows]
return self.pk_fields
def insert(self, data):
return self.conn.insert(self.table_obj, data)
def update(self, data, pk_fields=None):
if not pk_fields:
pk_fields = self._get_pk()
if not pk_fields:
raise Exception("Need Keys to perform UPDATE!")
t_fields = [x.lower() for x in data[0]._fields]
for f in pk_fields:
if not f.lower() in t_fields:
# if keys not provided, need to make sure PK values are provided in data records
raise Exception(
"Value of PK field '{}' must be provided to perform UPDATE!".
format(f))
self.conn.update(self.table_obj, data, pk_fields, echo=False)
def update_one(self, row, pk_cols=None):
self.update([row], pk_cols)
def update_rec(self, pk_cols=None, **kws):
self.update([make_rec(**kws)], pk_cols)
def replace(self, data, pk_fields=None):
if not pk_fields:
pk_fields = self._get_pk()
self.conn.replace(self.table_obj, data, pk_fields, echo=False)
# def replace_rec(self, pk_cols=None, **kws):
# # add default None?
# for field in self.ntRec._fields:
# kws[field] = kws.get(field, None)
# self.replace([self.ntRec(**kws)], pk_cols)
def query(self, where='1=1', one=False, limit=None, as_dict=False):
rows = self.conn.query(
"select * from {} where {}".format(self.table_obj, where),
echo=False,
limit=limit)
rows = rows_to_dicts(rows) if as_dict else rows
if one: return rows[0] if rows else None
else: return rows
def select_one(self, where, field=None, as_dict=False):
row = self.query(where, one=True, as_dict=as_dict)
if field and row:
return row[field] if as_dict else row.__getattribute__(field)
return row
def delete(self, where):
self.conn.execute("delete from {} where {}".format(self.table_obj, where))
def make_sqlx(conn, schema, tables):
"Make sqlx lookup function for given tables"
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
# return table_func_map
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if not obj:
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx
def get_sql_sources(sql_text, echo=False):
"""Obtain the source tables of a query
"""
import sqlparse
# replace "as(" to "as (" # this trips up the sql parser in CTEs
sql_text = re.sub(r"as\(", "as (", sql_text, 0, re.MULTILINE | re.IGNORECASE)
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while not done:
for tok in statement.tokens:
if tok.is_group:
if cte_mode and isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if (last_kw_from or last_kw_join) and last_tok.is_whitespace:
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier) and '(' in tok2.value:
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok2, sqlparse.sql.Identifier) and tok2.normalized.lower() not in cte_aliases:
if echo: log('+Table = ' + tok2.normalized.lower())
sources_dict[tok2.normalized.lower()] = tok.parent
elif isinstance(tok, sqlparse.sql.Identifier) and tok.normalized.lower() not in cte_aliases:
if echo: log('+Table = ' + tok.normalized.lower())
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if tok.is_keyword and tok.normalized == 'WITH':
cte_mode = True
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'GROUP':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'WHERE':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'ORDER':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'CREATE':
cte_mode = True
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'SELECT':
cte_mode = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'FROM':
last_kw_from = True
elif tok.is_keyword and 'JOIN' in tok.normalized:
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for s, statement in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table:
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo: log('-CREATE TABLE ' + create_table)
if tok.is_keyword and tok.normalized == 'TABLE' and last_kw_create:
last_kw_create_table = True
if tok.is_keyword and tok.normalized == 'CREATE':
last_kw_create = True
if tok.is_keyword and tok.normalized == 'FROM':
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources
| 31.146559 | 117 | 0.609395 | [
"MIT"
] | flarco/n1slutil | xutil/database/base.py | 38,466 | Python |
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import os
import sys
from glob import glob
from typing import List
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme # pylint: disable=unused-import
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = 'Airflow'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'redirects',
'providers_packages_ref',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
"sphinxcontrib.spelling",
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str] = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
# We have custom page - operators-and-hooks-ref.rst
'_api/airflow/providers/index.rst',
# Packages with subpackages
"_api/airflow/providers/microsoft/index.rst",
"_api/airflow/providers/apache/index.rst",
"_api/airflow/providers/cncf/index.rst",
# Templates or partials
'autoapi_templates',
'howto/operator/google/_partials',
'howto/operator/microsoft/_partials',
]
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition('.')[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if airflow_theme_is_available:
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
html_js_files = ['jira-links.js']
# Custom sidebar templates, maps document names to template names.
if airflow_theme_is_available:
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
}
if airflow_theme_is_available:
html_context = {
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': '/docs/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
jinja_contexts = {'config_ctx': {"configs": default_config_yaml()}}
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains the locations and names of other projects that should
# be linked to in this documentation.
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'celery': ('https://docs.celeryproject.org/en/stable/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
'jinja2': ('https://jinja.palletsprojects.com/en/master/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
# google-api
'google-api-core': ('https://googleapis.dev/python/google-api-core/latest', None),
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datacatalog': ('https://googleapis.dev/python/datacatalog/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None),
'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None),
'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None),
'google-cloud-language': ('https://googleapis.dev/python/language/latest', None),
'google-cloud-monitoring': ('https://googleapis.dev/python/monitoring/latest', None),
'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None),
'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None),
'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None),
'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None),
'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None),
'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None),
'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None),
'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None),
'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None),
'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None),
}
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'*/airflow/kubernetes/kubernetes_request_factory/*',
'*/_internal*',
'*/airflow/**/providers/**/utils/*',
'*/node_modules/*',
'*/example_dags/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
OPENAPI_FILE = os.path.join(os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml")
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/[email protected]/bundles/redoc.standalone.js"
| 38.009456 | 138 | 0.678505 | [
"Apache-2.0"
] | AndersonReyes/airflow | docs/conf.py | 16,080 | Python |
from django.db import models
class Todo(models.Model):
name = models.CharField(max_length=200)
| 16.833333 | 43 | 0.752475 | [
"BSD-3-Clause"
] | jgerigmeyer/jquery-django-superformset | demo/demo/todos/models.py | 101 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations(object):
"""RoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Route"]
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_08_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteListResult"]
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
| 48.290249 | 210 | 0.656884 | [
"MIT"
] | 4thel00z/microsoft-crap-that-doesnt-work | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py | 21,296 | Python |
import pytest
import pandas as pd
from sklearn.model_selection import train_test_split
TEST_SIZE = 0.33
RANDOM_STATE = 42
@pytest.fixture(scope="module")
def binary_dataset():
df = pd.read_csv("./resources/heart.csv")
features = df.iloc[0:, :-1]
labels = df.iloc[0:, -1].values.ravel()
X_train, X_test, y_train, y_test = train_test_split(
features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE
)
return X_train, X_test, y_train
@pytest.fixture(scope="module")
def multiclass_dataset():
df = pd.read_csv("./resources/glass.csv")
features = df.iloc[0:, :-1]
labels = df.iloc[0:, -1].values.ravel()
X_train, X_test, y_train, _ = train_test_split(
features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE
)
return X_train, X_test, y_train
| 25.030303 | 72 | 0.691283 | [
"BSD-3-Clause"
] | mdietrichstein/skpredict | tests/svm/conftest.py | 826 | Python |
"""
Makes python 2 behave more like python 3.
Ideally we import this globally so all our python 2 interpreters will assist in spotting errors early.
"""
# future imports are harmless if they implement behaviour that already exists in the current interpreter version
from __future__ import absolute_import, division, print_function
import sys
from collections import OrderedDict
if sys.version_info.major == 2:
# Override dict and make items() behave like iteritems() to retain performance
class dict(dict):
def items(self):
return super(dict, self).iteritems()
def keys(self):
return super(dict, self).iterkeys()
def values(self):
return super(dict, self).itervalues()
class OrderedDict(OrderedDict):
def items(self):
return super(OrderedDict, self).iteritems()
def keys(self):
return super(OrderedDict, self).iterkeys()
def values(self):
return super(OrderedDict, self).itervalues()
# Override range with xrange to mimic python3's range
range = xrange
import cStringIO as io
else:
unicode = str
long = int
import io
try:
from typing import *
T = TypeVar('T')
except:
pass
| 27.755556 | 112 | 0.670136 | [
"MIT"
] | peerke88/error-check-tool | errorCheckTool/py23.py | 1,249 | Python |
import rebound
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
from sherlockpipe.nbodies.PlanetInput import PlanetInput
class StabilityCalculator:
def __init__(self, star_mass):
self.star_mass = star_mass
def mass_from_radius(self, radius):
return radius ** (1 / 0.55) if radius <= 12.1 else radius ** (1 / 0.01)
def run(self, planet_params):
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.ri_whfast.safe_mode = 0
sim.dt = 1e-2
sim.add(m=1.0)
for planet_param in planet_params:
sim.add(m=self.mass_from_radius(planet_param.r) * 0.000003003 / self.star_mass, P=planet_param.P, e=planet_param.e, omega=planet_param.omega)
#sim.status()
sim.move_to_com()
sim.init_megno()
sim.exit_max_distance = 20.
try:
sim.integrate(5e2 * 2. * np.pi, exact_finish_time=0) # integrate for 500 years, integrating to the nearest
# for i in range(500):
# sim.integrate(sim.t + i * 2 * np.pi)
# fig, ax = rebound.OrbitPlot(sim, color=True, unitlabel="[AU]", xlim=[-0.1, 0.1], ylim=[-0.1, 0.1])
# plt.show()
# plt.close(fig)
#clear_output(wait=True)
#timestep for each output to keep the timestep constant and preserve WHFast's symplectic nature
megno = sim.calculate_megno()
megno = megno if megno < 10 else 10
return megno
except rebound.Escape:
return 10. # At least one particle got ejected, returning large MEGNO
planet_params = []
parameters = []
# grid = 20
# par_e = np.linspace(0.0, 0.7, grid)
# par_e1 = np.linspace(0.0, 0.7, grid)
# for i in par_e:
# for j in par_e1:
# parameters.append((PlanetInput(1.74542, 0.01606, 1.12207, 0), PlanetInput(0.03088, 2.97, j)))
from rebound.interruptible_pool import InterruptiblePool
parameters.append(PlanetInput(5.43440, 1.68792, 0))
parameters.append(PlanetInput(1.74542, 1.12207, 0))
parameters.append(PlanetInput(4.02382, 1.34990, 0))
parameters.append(PlanetInput(2.8611, 1.17643, 0))
parameters.append(PlanetInput(1.58834, 1.07459, 0))
result = StabilityCalculator(0.211299).run(parameters)
print("MEGNO: " + str(result))
# pool = InterruptiblePool()
# results = pool.map(StabilityCalculator(0.211299).run, parameters)
# results2d = np.array(results).reshape(grid, grid)
# fig = plt.figure(figsize=(7, 5))
# ax = plt.subplot(111)
# extent = [min(par_e), max(par_e), min(par_e1), max(par_e1)]
# ax.set_xlim(extent[0], extent[1])
# ax.set_xlabel("ecc1 $e$")
# ax.set_ylim(extent[2], extent[3])
# ax.set_ylabel("ecc2 $e1$")
# im = ax.imshow(results2d, interpolation="none", vmin=1.9, vmax=10, cmap="RdYlGn_r", origin="lower", aspect='auto', extent=extent)
# cb = plt.colorbar(im, ax=ax)
# cb.set_label("MEGNO $\\langle Y \\rangle$")
# plt.show()
| 39.44 | 153 | 0.649763 | [
"MIT"
] | LuisCerdenoMota/SHERLOCK | experimental/megno.py | 2,958 | Python |
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.devices.Device import *
from acq4.util import Qt
import acq4.util.Mutex as Mutex
from collections import OrderedDict
class LightSource(Device):
"""Device tracking the state and properties of multiple illumination sources.
"""
# emitted when the on/off status of a light changes
sigLightChanged = Qt.Signal(object, object) # self, light_name
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
self._sources = OrderedDict() # [name: {'active': bool, 'wavelength': float, 'power': float, ...}, ...]
self._lock = Mutex.Mutex()
def addSource(self, name, conf):
self._sources[name] = conf
if 'xkey' in conf:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
dev.addKeyCallback((row, col), self._hotkeyPressed, (name,))
def describe(self, onlyActive=True):
"""Return a description of the current state of all active light sources.
If onlyActive is False, then information for all sources will be returned, whether or not they are active.
"""
if onlyActive:
return OrderedDict([(n,s) for n,s in self._sources.items() if s['active']])
else:
return self._sources.copy()
def activeSources(self):
"""Return the names of all active light sources.
"""
return [s['name'] for s in self._sources if s['active']]
def sourceActive(self, name):
"""Return True if the named light source is currently active.
"""
return self._sources[name]['active']
def setSourceActive(self, name, active):
"""Activate / deactivate a light source.
"""
raise NotImplementedError()
def _updateXkeyLight(self, name):
if 'xkey' in self._sources[name]:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
bl = dev.getBacklights()
bl[row,col] = int(self._sources[name]['active'])
dev.setBacklights(bl)
def _hotkeyPressed(self, dev, changes, name):
self.setSourceActive(name, not self.sourceActive(name))
| 36.645161 | 114 | 0.62632 | [
"MIT"
] | RonnyBergmann/acq4 | acq4/devices/LightSource/LightSource.py | 2,272 | Python |
import asyncio
import dataclasses
import logging
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from typing import Dict, List, Optional, Set, Tuple, Union
from clvm.casts import int_from_bytes
from kujenga.consensus.block_body_validation import validate_block_body
from kujenga.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block
from kujenga.consensus.block_record import BlockRecord
from kujenga.consensus.blockchain_interface import BlockchainInterface
from kujenga.consensus.constants import ConsensusConstants
from kujenga.consensus.cost_calculator import NPCResult
from kujenga.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from kujenga.consensus.find_fork_point import find_fork_point_in_chain
from kujenga.consensus.full_block_to_block_record import block_to_block_record
from kujenga.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing
from kujenga.full_node.block_store import BlockStore
from kujenga.full_node.coin_store import CoinStore
from kujenga.full_node.hint_store import HintStore
from kujenga.full_node.mempool_check_conditions import get_name_puzzle_conditions
from kujenga.types.blockchain_format.coin import Coin
from kujenga.types.blockchain_format.sized_bytes import bytes32
from kujenga.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from kujenga.types.blockchain_format.vdf import VDFInfo
from kujenga.types.coin_record import CoinRecord
from kujenga.types.condition_opcodes import ConditionOpcode
from kujenga.types.end_of_slot_bundle import EndOfSubSlotBundle
from kujenga.types.full_block import FullBlock
from kujenga.types.generator_types import BlockGenerator, GeneratorArg
from kujenga.types.header_block import HeaderBlock
from kujenga.types.unfinished_block import UnfinishedBlock
from kujenga.types.unfinished_header_block import UnfinishedHeaderBlock
from kujenga.types.weight_proof import SubEpochChallengeSegment
from kujenga.util.errors import Err
from kujenga.util.generator_tools import get_block_header, tx_removals_and_additions
from kujenga.util.ints import uint16, uint32, uint64, uint128
from kujenga.util.streamable import recurse_jsonify
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
class Blockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# Defines the path from genesis to the peak, no orphan blocks
__height_to_hash: Dict[uint32, bytes32]
# All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak
# (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak
__sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
# Unspent Store
coin_store: CoinStore
# Store
block_store: BlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
# Set holding seen compact proofs, in order to avoid duplicates.
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
compact_proof_lock: asyncio.Lock
hint_store: HintStore
@staticmethod
async def create(
coin_store: CoinStore, block_store: BlockStore, consensus_constants: ConsensusConstants, hint_store: HintStore
):
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = Blockchain()
self.lock = asyncio.Lock() # External lock handled by full node
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - 2, 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
await self._load_chain_from_store()
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self) -> None:
"""
Initializes the state of the Blockchain class from the database.
"""
height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_height_dicts()
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
block_records, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block in block_records.values():
self.add_block_record(block)
if len(block_records) == 0:
assert peak is None
self._peak_height = None
return None
assert peak is not None
self._peak_height = self.block_record(peak).height
assert len(self.__height_to_hash) == self._peak_height + 1
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def get_full_peak(self) -> Optional[FullBlock]:
if self._peak_height is None:
return None
""" Return list of FullBlocks that are peaks"""
block = await self.block_store.get_full_block(self.height_to_hash(self._peak_height))
assert block is not None
return block
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
return await self.block_store.get_full_block(header_hash)
async def receive_block(
self,
block: FullBlock,
pre_validation_result: Optional[PreValidationResult] = None,
fork_point_with_peak: Optional[uint32] = None,
) -> Tuple[
ReceiveBlockResult,
Optional[Err],
Optional[uint32],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
This method must be called under the blockchain lock
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
"""
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {})
if not self.contains_block(block.prev_header_hash) and not genesis:
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if not genesis and (self.block_record(block.prev_header_hash).height + 1) != block.height:
return ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {})
npc_result: Optional[NPCResult] = None
if pre_validation_result is None:
if block.height == 0:
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(block.finished_sub_slots) > 0, prev_b, self
)
if block.is_transaction_block():
if block.transactions_generator is not None:
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return ReceiveBlockResult.INVALID_BLOCK, Err.GENERATOR_REF_HAS_NO_GENERATOR, None, ([], {})
assert block_generator is not None and block.transactions_info is not None
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
header_block = get_block_header(block, tx_additions, removals)
else:
npc_result = None
header_block = get_block_header(block, [], [])
required_iters, error = validate_finished_header_block(
self.constants,
self,
header_block,
False,
difficulty,
sub_slot_iters,
)
if error is not None:
return ReceiveBlockResult.INVALID_BLOCK, error.code, None, ([], {})
else:
npc_result = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
assert pre_validation_result.error is None
assert required_iters is not None
error_code, _ = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
block.height,
npc_result,
fork_point_with_peak,
self.get_block_generator,
)
if error_code is not None:
return ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {})
block_record = block_to_block_record(
self.constants,
self,
required_iters,
block,
None,
)
# Always add the block to the database
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
# Perform the DB operations to update the state, and rollback if something goes wrong
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_full_block(header_hash, block, block_record)
fork_height, peak_height, records, (coin_record_change, hint_changes) = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, npc_result
)
await self.block_store.db_wrapper.commit_transaction()
# Then update the memory cache. It is important that this task is not cancelled and does not throw
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
if fetched_block_record.sub_epoch_summary_included is not None:
self.__sub_epoch_summaries[
fetched_block_record.height
] = fetched_block_record.sub_epoch_summary_included
if peak_height is not None:
self._peak_height = peak_height
except BaseException:
self.block_store.rollback_cache_block(header_hash)
await self.block_store.db_wrapper.rollback_transaction()
raise
if fork_height is not None:
# new coin records added
assert coin_record_change is not None
return ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes)
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {})
def get_hint_list(self, npc_result: NPCResult) -> List[Tuple[bytes32, bytes]]:
h_list = []
for npc in npc_result.npc_list:
for opcode, conditions in npc.conditions:
if opcode == ConditionOpcode.CREATE_COIN:
for condition in conditions:
if len(condition.vars) > 2 and condition.vars[2] != b"":
puzzle_hash, amount_bin = condition.vars[0], condition.vars[1]
amount = int_from_bytes(amount_bin)
coin_id = Coin(npc.coin_name, puzzle_hash, amount).name()
h_list.append((coin_id, condition.vars[2]))
return h_list
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
npc_result: Optional[NPCResult],
) -> Tuple[
Optional[uint32],
Optional[uint32],
List[BlockRecord],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
lastest_coin_state: Dict[bytes32, CoinRecord] = {}
hint_coin_state: Dict[bytes32, Dict[bytes32, CoinRecord]] = {}
if genesis:
if peak is None:
block: Optional[FullBlock] = await self.block_store.get_full_block(block_record.header_hash)
assert block is not None
if npc_result is not None:
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
tx_removals, tx_additions = [], []
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
added = await self.coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
else:
added, _ = [], []
await self.block_store.set_peak(block_record.header_hash)
return uint32(0), uint32(0), [block_record], (added, {})
return None, None, [], ([], {})
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if block_record.prev_hash == peak.header_hash:
fork_height: int = peak.height
elif fork_point_with_peak is not None:
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if block_record.prev_hash != peak.header_hash:
roll_changes: List[CoinRecord] = await self.coin_store.rollback_to_block(fork_height)
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
# Rollback sub_epoch_summaries
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if ses_included_height > fork_height:
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
log.info(f"delete ses at height {height}")
del self.__sub_epoch_summaries[height]
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
curr = block_record.header_hash
while fork_height < 0 or curr != self.height_to_hash(uint32(fork_height)):
fetched_full_block: Optional[FullBlock] = await self.block_store.get_full_block(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
assert fetched_full_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_full_block, fetched_block_record))
if fetched_full_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add = []
for fetched_full_block, fetched_block_record in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if fetched_block_record.header_hash == block_record.header_hash:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, npc_result
)
else:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, None
)
assert fetched_full_block.foliage_transaction_block is not None
added_rec = await self.coin_store.new_block(
fetched_full_block.height,
fetched_full_block.foliage_transaction_block.timestamp,
fetched_full_block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
removed_rec: List[Optional[CoinRecord]] = [
await self.coin_store.get_coin_record(name) for name in tx_removals
]
# Set additions first, then removals in order to handle ephemeral coin state
# Add in height order is also required
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if npc_res is not None:
hint_list: List[Tuple[bytes32, bytes]] = self.get_hint_list(npc_res)
await self.hint_store.add_hints(hint_list)
# There can be multiple coins for the same hint
for coin_id, hint in hint_list:
key = hint
if key not in hint_coin_state:
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
return (
uint32(max(fork_height, 0)),
block_record.height,
records_to_add,
(list(lastest_coin_state.values()), hint_coin_state),
)
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, None, [], ([], {})
async def get_tx_removals_and_additions(
self, block: FullBlock, npc_result: Optional[NPCResult] = None
) -> Tuple[List[bytes32], List[Coin], Optional[NPCResult]]:
if block.is_transaction_block():
if block.transactions_generator is not None:
if npc_result is None:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
assert block_generator is not None
npc_result = get_name_puzzle_conditions(
block_generator,
self.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
return tx_removals, tx_additions, npc_result
else:
return [], [], None
else:
return [], [], None
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def get_sp_and_ip_sub_slots(
self, header_hash: bytes32
) -> Optional[Tuple[Optional[EndOfSubSlotBundle], Optional[EndOfSubSlotBundle]]]:
block: Optional[FullBlock] = await self.block_store.get_full_block(header_hash)
if block is None:
return None
curr_br: BlockRecord = self.block_record(block.header_hash)
is_overflow = curr_br.overflow
curr: Optional[FullBlock] = block
assert curr is not None
while True:
if curr_br.first_in_sub_slot:
curr = await self.block_store.get_full_block(curr_br.header_hash)
assert curr is not None
break
if curr_br.height == 0:
break
curr_br = self.block_record(curr_br.prev_hash)
if len(curr.finished_sub_slots) == 0:
# This means we got to genesis and still no sub-slots
return None, None
ip_sub_slot = curr.finished_sub_slots[-1]
if not is_overflow:
# Pos sub-slot is the same as infusion sub slot
return None, ip_sub_slot
if len(curr.finished_sub_slots) > 1:
# Have both sub-slots
return curr.finished_sub_slots[-2], ip_sub_slot
prev_curr: Optional[FullBlock] = await self.block_store.get_full_block(curr.prev_header_hash)
if prev_curr is None:
assert curr.height == 0
prev_curr = curr
prev_curr_br = self.block_record(curr.header_hash)
else:
prev_curr_br = self.block_record(curr.prev_header_hash)
assert prev_curr_br is not None
while prev_curr_br.height > 0:
if prev_curr_br.first_in_sub_slot:
prev_curr = await self.block_store.get_full_block(prev_curr_br.header_hash)
assert prev_curr is not None
break
prev_curr_br = self.block_record(prev_curr_br.prev_hash)
if len(prev_curr.finished_sub_slots) == 0:
return None, ip_sub_slot
return prev_curr.finished_sub_slots[-1], ip_sub_slot
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]:
peak = self.get_peak()
if peak is None:
return []
recent_rc: List[Tuple[bytes32, uint128]] = []
curr: Optional[BlockRecord] = peak
while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
if curr != peak:
recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters))
if curr.first_in_sub_slot:
assert curr.finished_reward_slot_hashes is not None
sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants)
# Start from the most recent
for rc in reversed(curr.finished_reward_slot_hashes):
if sub_slot_total_iters < curr.sub_slot_iters:
break
recent_rc.append((rc, sub_slot_total_iters))
sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters)
curr = self.try_block_record(curr.prev_hash)
return list(reversed(recent_rc))
async def validate_unfinished_block(
self, block: UnfinishedBlock, skip_overflow_ss_validation=True
) -> PreValidationResult:
if (
not self.contains_block(block.prev_header_hash)
and not block.prev_header_hash == self.constants.GENESIS_CHALLENGE
):
return PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
prev_b = self.try_block_record(unfinished_header_block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(unfinished_header_block.finished_sub_slots) > 0, prev_b, self
)
required_iters, error = validate_unfinished_header_block(
self.constants,
self,
unfinished_header_block,
False,
difficulty,
sub_slot_iters,
skip_overflow_ss_validation,
)
if error is not None:
return PreValidationResult(uint16(error.code.value), None, None)
prev_height = (
-1
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE
else self.block_record(block.prev_header_hash).height
)
npc_result = None
if block.transactions_generator is not None:
assert block.transactions_info is not None
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
if block_generator is None:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
error_code, cost_result = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
uint32(prev_height + 1),
npc_result,
None,
self.get_block_generator,
)
if error_code is not None:
return PreValidationResult(uint16(error_code.value), None, None)
return PreValidationResult(None, required_iters, cost_result)
async def pre_validate_blocks_multiprocessing(
self,
blocks: List[FullBlock],
npc_results: Dict[uint32, NPCResult],
batch_size: int = 4,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
return await pre_validate_blocks_multiprocessing(
self.constants,
self.constants_json,
self,
blocks,
self.pool,
True,
npc_results,
self.get_block_generator,
batch_size,
wp_summaries,
)
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self.__sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
return self.__height_to_hash[height]
def contains_height(self, height: uint32) -> bool:
return height in self.__height_to_hash
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
block_records = await self.block_store.get_block_records_in_range(
max(fork_point - self.constants.BLOCKS_CACHE_SIZE, uint32(0)), fork_point
)
for block_record in block_records.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash] # remove from blocks
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height = height - 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
"""
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
peak = self.get_peak()
assert peak is not None
if peak.height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(peak.height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
hashes = []
for height in range(start, stop + 1):
if self.contains_height(uint32(height)):
header_hash: bytes32 = self.height_to_hash(uint32(height))
hashes.append(header_hash)
blocks: List[FullBlock] = []
for hash in hashes.copy():
block = self.block_store.block_cache.get(hash)
if block is not None:
blocks.append(block)
hashes.remove(hash)
blocks_on_disk: List[FullBlock] = await self.block_store.get_blocks_by_hash(hashes)
blocks.extend(blocks_on_disk)
header_blocks: Dict[bytes32, HeaderBlock] = {}
for block in blocks:
if self.height_to_hash(block.height) != block.header_hash:
raise ValueError(f"Block at {block.header_hash} is no longer in the blockchain (it's in a fork)")
if tx_filter is False:
header = get_block_header(block, [], [])
else:
tx_additions: List[CoinRecord] = [
c for c in (await self.coin_store.get_coins_added_at_height(block.height)) if not c.coinbase
]
removed: List[CoinRecord] = await self.coin_store.get_coins_removed_at_height(block.height)
header = get_block_header(
block, [record.coin for record in tx_additions], [record.coin.name() for record in removed]
)
header_blocks[header.header_hash] = header
return header_blocks
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
header_dict: Dict[bytes32, HeaderBlock] = await self.get_header_blocks_in_range(height, height, tx_filter)
if len(header_dict) == 0:
return None
if header_hash not in header_dict:
return None
return header_dict[header_hash]
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
"""
gets block records by height (only blocks that are part of the chain)
"""
records: List[BlockRecord] = []
hashes = []
assert batch_size < 999 # sqlite in python 3.7 has a limit on 999 variables in queries
for height in heights:
hashes.append(self.height_to_hash(height))
if len(hashes) > batch_size:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
hashes = []
if len(hashes) > 0:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
return records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
"""
Adds a block record to the cache.
"""
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
):
return await self.block_store.persist_sub_epoch_challenge_segments(ses_block_hash, segments)
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments: Optional[List[SubEpochChallengeSegment]] = await self.block_store.get_sub_epoch_challenge_segments(
ses_block_hash
)
if segments is None:
return None
return segments
# Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it.
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pot_tuple = (vdf_info, height)
if pot_tuple in self._seen_compact_proofs:
return True
# Periodically cleanup to keep size small. TODO: make this smarter, like FIFO.
if len(self._seen_compact_proofs) > 10000:
self._seen_compact_proofs.clear()
self._seen_compact_proofs.add(pot_tuple)
return False
async def get_block_generator(
self, block: Union[FullBlock, UnfinishedBlock], additional_blocks=None
) -> Optional[BlockGenerator]:
if additional_blocks is None:
additional_blocks = {}
ref_list = block.transactions_generator_ref_list
if block.transactions_generator is None:
assert len(ref_list) == 0
return None
if len(ref_list) == 0:
return BlockGenerator(block.transactions_generator, [])
result: List[GeneratorArg] = []
previous_block_hash = block.prev_header_hash
if (
self.try_block_record(previous_block_hash)
and self.height_to_hash(self.block_record(previous_block_hash).height) == previous_block_hash
):
# We are not in a reorg, no need to look up alternate header hashes (we can get them from height_to_hash)
for ref_height in block.transactions_generator_ref_list:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
# First tries to find the blocks in additional_blocks
reorg_chain: Dict[uint32, FullBlock] = {}
curr: Union[FullBlock, UnfinishedBlock] = block
additional_height_dict = {}
while curr.prev_header_hash in additional_blocks:
prev: FullBlock = additional_blocks[curr.prev_header_hash]
additional_height_dict[prev.height] = prev
if isinstance(curr, FullBlock):
assert curr.height == prev.height + 1
reorg_chain[prev.height] = prev
curr = prev
peak: Optional[BlockRecord] = self.get_peak()
if self.contains_block(curr.prev_header_hash) and peak is not None:
# Then we look up blocks up to fork point one at a time, backtracking
previous_block_hash = curr.prev_header_hash
prev_block_record = await self.block_store.get_block_record(previous_block_hash)
prev_block = await self.block_store.get_full_block(previous_block_hash)
assert prev_block is not None
assert prev_block_record is not None
fork = find_fork_point_in_chain(self, peak, prev_block_record)
curr_2: Optional[FullBlock] = prev_block
assert curr_2 is not None and isinstance(curr_2, FullBlock)
reorg_chain[curr_2.height] = curr_2
while curr_2.height > fork and curr_2.height > 0:
curr_2 = await self.block_store.get_full_block(curr_2.prev_header_hash)
assert curr_2 is not None
reorg_chain[curr_2.height] = curr_2
for ref_height in block.transactions_generator_ref_list:
if ref_height in reorg_chain:
ref_block = reorg_chain[ref_height]
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
if ref_height in additional_height_dict:
ref_block = additional_height_dict[ref_height]
else:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
assert len(result) == len(ref_list)
return BlockGenerator(block.transactions_generator, result)
| 45.399784 | 119 | 0.636396 | [
"Apache-2.0"
] | Kujenga-Network/kujenga-blockchain | kujenga/consensus/blockchain.py | 42,131 | Python |
class ListData():
def __init__(instance):
### INTERNAL PARAMETERS #############
instance.missing_data_character = " "
#####################################
instance.dataset = []
def headers(instance):
"""
Returns the first row of the instance.dataset
Returns:
List
"""
return instance.dataset[0]
def data_rows(instance):
"""
Returns the rows of the instance.dataset except the first rows.
Returns:
List
"""
return instance.dataset[1:len(instance.dataset)]
def import_csv_file(instance, input_file_path,
column_delimiter_pattern_in_input_file,
line_head_pattern_to_remove='',
line_tail_pattern_to_remove='',
cell_head_and_tail_characters_to_remove=''):
"""
Returns:
nothing
Examples:
>>> # Import a CSV file (yasgui.org formatting)
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//yasgui_output_100.csv',
... column_delimiter_pattern_in_input_file=' , ',
... line_tail_pattern_to_remove=' ,',
... cell_head_and_tail_characters_to_remove='"')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['publication_type', 'journal_article', 'title', 'publication_year', 'author_name', 'journal_name', 'journal_issue_number', 'journal_volume_number', 'startEndPages', 'publisher_name', 'doi']
----------------------------------LINE 2----------------------------------
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']
<BLANKLINE>
CSV file "test_data//yasgui_output_100.csv" is imported as ListData object.
>>> # Parse a one-column CSV file
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//one_column_data.csv',
... column_delimiter_pattern_in_input_file=',')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['doi', '']
----------------------------------LINE 2----------------------------------
['10.1163/187607508X384689', '']
<BLANKLINE>
CSV file "test_data//one_column_data.csv" is imported as ListData object.
>>> my_list_data.get_column_at_index(0)
['doi', '10.1163/187607508X384689', '10.1017/S0954579416000572', '10.1007/s11562-016-0353-7', '10.1016/j.adolescence.2016.09.008', '10.1186/s13561-016-0122-6', '10.1007/s00799-016-0182-6', '10.5194/gmd-2016-266', '10.1007/s00737-015-0531-2', '10.1103/RevModPhys.88.021003', 'https://doi.org/10.1101/167171', 'https://doi.org/10.1016/j.chb.2017.04.047', '10.1016/j.trb.2016.09.005', '10.1016/j.ancene.2016.01.001', '10.1111/adb.12322', '10.1017/njg.2016.45', '10.1080/1359432X.2016.1209489', '10.1117/1.JBO.21.6.066008', '10.5194/gmd-10-3329-2017', '10.1016/j.rser.2017.01.103', '10.1177/2050157916664559', '10.1007/978-3-319-45931-8_17', '10.1007/s11136-015-1171-8', '10.1145/2991079.2991121', '10.1093/cz/zow089', '10.1126/science.aac8167', '10.1007/s00586-016-4606-1', '10.1186/s12937-017-0229-6', '10.1007/s11357-016-9894-1', '10.1080/00130095.2015.1094371', '10.1016/j.epsl.2016.02.028', '10.1371/journal.pone.0168636', '10.1016/j.atmosres.2016.03.016', '10.1111/deci.12206', '10.1126/science.aad9634', '10.1103/PhysRevA.94.012506', '10.4103/0019-5545.196846', '10.1016/j.cedpsych.2017.01.006', '10.3324/haematol.2015.133470', '10.1057/978-1-137-50956-7', '10.1016/j.scico.2016.04.001', 'https://doi.org/10.1016/j.scico.2016.04.001', '10.1080/03081087.2015.1053425', '10.3758/s13423-017-1270-3', '10.1681/ASN.2015030287', '10.1016/j.avb.2016.05.006', '10.1177/0971333616689191', '10.1002/sej.1243', '10.1016/j.foreco.2017.06.023', '10.1103/PhysRevLett.118.071801', 'https://doi.org/10.1093/geront/gnv127', '10.1007/978-3-319-42324-1_16', '10.1109/JBHI.2015.2412656', '10.1016/j.jeem.2016.04.002', '10.1080/00207543.2015.1058982', '10.1038/mp.2016.100', '10.1080/03003930.2016.1194267', '10.1016/j.envint.2017.01.018', '10.1038/pr.2015.179', '10.1177/1753193416669263', '10.1016/j.tre.2016.11.003', '10.1021/acs.jpcc.5b12016', '10.1002/anie.201603510', '10.1073/pnas.1607005113', '(DOI) - 10.1111/cch.12521', '10.1017/S0016756815000886', '10.1080/1350293X.2015.1073507', '10.1152/jn.00701.2015', '10.1371/journal.pone.0170791', '10.1016/j.seares.2016.07.005', '10.1016/j.reseneeco.2016.03.003', '10.1007/s00531-017-1499-0', '10.1007/s41669-017-0014-7', '10.1093/acrefore/9780190228613.013.439', '10.14814/phy2.13201', '10.1016/j.jtrangeo.2016.10.013', '10.1523/JNEUROSCI.3658-16.2017', '10.1192/bjpo.bp.115.000166', '10.1136/bmjgh-2016-000109', '10.7554/eLife.20320.001', '10.1037/pas0000332', '10.1177/1474704916673841', '10.1057/978-1-137-58179-2', '10.1002/ejp.963', '10.1017/thg.2016.78', '10.1038/tpj.2016.32', '10.1016/j.jesp.2017.03.008', '10.1287/trsc.2015.0647', '10.1186/s13015-016-0087-3', '10.1016/j.neuroimage.2016.10.030', '10.1371/journal.pone.0169109', '10.1007/s11367-017-1358-z', '10.1080/1369183X.2015.1061425', '10.2196/mental.4614', '10.1002/arp.1564', '10.1021/acs.orglett.6b01023', '10.3847/1538-4357/aa6c47', 'http://www.socialevraagstukken.nl/veiligheid-creeer-je-met-geborgenheid/', '10.1186/s12888-016-0790-0', '10.1371/journal.pone.0155755']
#>>> Enter parsing paramaters that do not match the contents of the CSV file
#>>> Error is not invoked anymore as another from CSV_File takes over. Kept for possible future use
#>>> my_list_data = ListData()
#>>> try:
#... my_list_data.import_csv_file('test_data//one_column_data.txt',
#... column_delimiter_pattern_in_input_file='\\n',
#... line_head_pattern_to_remove='',
#... line_tail_pattern_to_remove='')
#... except Exception as error_message:
#... print('Exception caught: ' + str(error_message))
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
Exception caught: No data imported from CSV file "test_data//one_column_data.csv". Parsing parameters provided does not seem to match formatting of the inputted CSV file.
"""
from preprocessor.csv_tools import CSV_File
csv_file = CSV_File(input_file_path,
column_delimiter_pattern_in_input_file=column_delimiter_pattern_in_input_file)
csv_file.set_parsing_and_cleaning_parameters(line_head_pattern_to_remove=line_head_pattern_to_remove,
line_tail_pattern_to_remove=line_tail_pattern_to_remove,
cell_head_and_tail_characters_to_remove=cell_head_and_tail_characters_to_remove)
with open(csv_file.input_file_path, encoding='utf8') as input_file:
for i, each_line in enumerate(input_file):
csv_line = csv_file.get_line_at_position_from_file(i + 1)
csv_row = csv_file.clean_and_parse_line_to_CSV_Row_using_cleaning_parameters(csv_line)
instance.append_row(csv_row)
if instance.dataset: # if not empty
print('\nCSV file "%s" is imported as ListData object.' % csv_file.input_file_path)
else:
raise ValueError('No data imported from CSV file "%s". Parsing parameters provided does not seem to match '
'formatting of the inputted CSV file.' % csv_file.input_file_path)
def import_json_object(instance, json_object):
"""
Converts a JSON formatted object to a ListData object.
Args:
json_dictionary(dict): a dictionary that is formatted as JSON
Returns:
Examples:
>>> my_json_object = {
... 1: {'label': 'Example', 'value': 3},
... 2: {'label': 'Test', 'value': 1},
... 3: {'label': 'Tryout'}
... }
>>> print(my_json_object)
{1: {'label': 'Example', 'value': 3}, 2: {'label': 'Test', 'value': 1}, 3: {'label': 'Tryout'}}
>>> my_list_data = ListData()
>>> my_list_data.import_json_object(my_json_object)
>>> print(my_list_data.dataset)
[['label', 'value'], ['Example', 3], ['Test', 1], ['Tryout', ' ']]
"""
from preprocessor.legacy_functions.get_header_index import get_header_index
# iterate through all entries and their ids in the input Bibliography
# (this first iteration is only for indexing all headers in the instance.headers_row. all headers must be
# indexed first before starting to add data rows, because adding header names on-the-go would create data rows
# of differing lengths)
# instance.headers should be empty (and thus, should give an error if attempted to be indexed)
try:
# if instance.headers is not empty (and thus, does not give an index error) raise exception
if instance.headers():
raise Exception('Instance.headers not empty prior to append operation. This method is not compatible '
'with adding new headers/columns.')
# if there an index error, this indicates that the instance.headers() is indeed empty (and therefore cannot
# be indexed).
except IndexError:
headers_list = []
for each_entry_id, each_entry_data in json_object.items():
# add each field name in the input Bibliography to instance.headers_row
for each_field_name in each_entry_data.keys():
if each_field_name not in headers_list:
# add to headers row
headers_list.append(each_field_name)
# add the now fully populated instance.headers_row as the first row of the full dataset (instance.dataset)
instance.dataset.append(headers_list)
# iterate (once again) through all entries and their ids in the input Bibliography
# (this second iteration is for adding data rows)
for each_entry_id, each_entry_data in json_object.items():
# add a blank list to represent a new row per each entry in inputted Bibliography object.
instance.dataset.append([])
# select the last added row
current_row = instance.dataset[-1]
# make this last added row (i.e., each row) as long as the header row
while len(current_row) < len(instance.headers()):
current_row.append(instance.missing_data_character)
# for each field_name-field_value pair in the input Bibliography
for each_field_name, each_field_value in each_entry_data.items():
# extract the index number of the field name's representation in the headers row
current_field_name_header_index = get_header_index(each_field_name, instance.dataset)
current_row[current_field_name_header_index] = each_field_value
def import_bibliography_object(instance, bibliography_object):
"""
Converts a Bibliography class object to a ListData object.
Returns:
ListData class object
Examples:
>>> from triplicator.bibTools import Bibliography
>>> my_bibliography = Bibliography()
>>> my_bibliography.setEntry('01', 'author', 'John Doe')
>>> my_bibliography.setEntry('02', 'author', 'Jane Doe')
>>> #my_bibliography.import_data('..//triplicator//example_data//test.bib')
>>> print(my_bibliography.entries)
{'01': {'author': 'John Doe'}, '02': {'author': 'Jane Doe'}}
>>> my_list_data = ListData()
>>> my_list_data.import_bibliography_object(my_bibliography)
>>> print(my_list_data.dataset)
[['author'], ['John Doe'], ['Jane Doe']]
"""
instance.import_json_object(bibliography_object.entries)
def get_column_at_index(instance, index):
'''
Allows columns to be selected (i.e., returned) by entering their index position.
:return: A list vector that contains values from the queried column
:example:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_column_at_index(1)
['birth_date', 2084, 2054]
'''
#############################################################################################################
# assign the column matching the current_index to a variable
column = [each_row[index] for each_row in instance.dataset]
return column
def get_row_length(instance):
"""
Gets the length of a sample row from the dataset.
Returns:
Integer
Examples:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_row_length()
2
"""
probe_index = 0
row_length = 0
try:
row_length = len(instance.dataset[probe_index])
except IndexError:
raise ('Not possible to probe row at index %s. Nothing found at this index position.' % probe_index)
return row_length
def transpose_dataset(instance):
"""
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'john', 'jane'], ['birth_date', 2084, 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset == my_listdata.transpose_dataset().transpose_dataset().dataset
True
"""
row_length = instance.get_row_length()
columns = [instance.get_column_at_index(i) for i in range(0, row_length)]
instance.dataset = columns
return instance
def merge_all_rows_to_one(instance, value_separator_pattern=' | '):
"""
>>> my_listdata = ListData().append_row(['john', 2054]).append_row(['john', 3254])
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254']
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['john', 2054], ['john', 3254], ['john', 2672]]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254 | 2672']
# method does not deal with headers
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.merge_all_rows_to_one().dataset
['name | john', 'birth_date | 2084 | 2054']
# but headers can be easily managed
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.dataset = my_listdata.dataset[1:]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2084 | 2054']
# different separator pattern (and a transpose-like operation)
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054], ['jane', 2054]]
>>> my_listdata.merge_all_rows_to_one('; ').dataset
['name; john; jane', 'birth_date; 2084; 2054']
>>> type(my_listdata.dataset)
<class 'list'>
>>> from preprocessor.csv_tools import CSV_Line, CSV_Row, Row_Merge_Buffer
>>> line_1 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893" ,')
>>> line_2 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893" ,')
>>> line_1.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893"'
>>> line_2.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893"'
>>> row_1 = line_1.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> row_2 = line_2.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> buffer = Row_Merge_Buffer(1)
>>> buffer.append_as_first_row_and_reset_buffer(row_1)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']]"
>>> buffer.append_row_if_ids_match(row_2)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]"
>>> buffer.merge_all_rows_to_one(' | ')
"https://w3id.org/oc/corpus/br/45174: ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']"
# List conversion with actual rows
>>> a = ListData()
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Row_Merge_Buffer class conversion with actual rows
>>> a = Row_Merge_Buffer(1)
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Error from empty dataset
>>> a = ListData()
>>>
>>> try:
... a.merge_all_rows_to_one(' | ') # no item to index in empty dataset
... except Exception as error_message:
... print('Exception: ' + str(error_message))
Exception: Dataset to be merged is either empty or not indexable (no item at index [0]).
The input dataset is:
[]
"""
try:
instance.dataset[0]
except IndexError:
raise IndexError('Dataset to be merged is either empty or not indexable (no item at index [0]).\nThe input dataset is:\n%s' % str(instance.dataset))
dataset = instance.dataset
# initiate merged_row with the first row of the dataset
merged_row = dataset[0]
for each_row in dataset:
current_row = each_row
current_cell_position = 0
for each_current_cell, each_merged_cell in zip(current_row, merged_row):
if str(each_current_cell) not in str(each_merged_cell): # str conversion needed for 'in' comparison
merged_cell = str(each_merged_cell) + value_separator_pattern + str(each_current_cell)
merged_row[current_cell_position] = merged_cell
current_cell_position += 1
# no need to specify an else scenario, as if compared cells are the same, merged row can stay as is
instance.dataset = merged_row
return instance
def append_row(instance, new_row):
"""
Appends a row the ListData object's dataset variable.
Returns:
ListData object (instance)
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.append_row(['a','b','c']).dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.append_row(['x', 'y']).append_row(['z', 't']).append_row(['m', 'n']).dataset
[[1, 2, 3], ['a', 'b', 'c'], ['x', 'y'], ['z', 't'], ['m', 'n']]
"""
instance.dataset.append(new_row)
return instance
def clear_all(instance):
"""
Resets ListData object's dataset variable to its empty state.
Returns:
ListData object
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.clear_all().dataset
[]
>>> my_listdata.dataset
[]
"""
instance.dataset = []
return instance
def append_column(instance, new_column_values, new_column_name):
"""
:param new_column_values:
:param new_column_name:
:param dataset:
:return: Changes the inputted dataset when ran (no need for assigning the output to a variable).
:usage: append_column(NEW_COLUMN_VARIABLES_LIST, NEW_COLUMN_NAME_STRING, DATASET)
:example:
>>> my_list_data = ListData()
>>> my_list_data.dataset = [['day', 'month'], [1, 'June'], [3, 'May'], [4, 'Jun']]
>>> years_column = [2149, 2150, 2151]
>>> my_list_data.append_column(years_column, "year")
>>> print(my_list_data.dataset) # changes the original data set without a need to assign the output to a new variable, etc.
[['day', 'month', 'year'], [1, 'June', 2149], [3, 'May', 2150], [4, 'Jun', 2151]]
"""
#############################################################################################################
# Check for duplicate header names
if new_column_name in instance.headers(): # if this duplicate check is not included, things go wrong (the duplicate header gets added to column values—a strange behavior, but it is prevented with not allowing duplicate headers).
print(
"ERROR: Header name already in dataset. Re-run all code up to this point or change header name.\nError "
"occured while processing new_column_name: " + str(
new_column_name))
raise ValueError(
"Header name already in dataset. Please choose a different name. If name is correct, try re-running "
"all code up to this point. (See console output for last header name processed.)")
if len(new_column_values) != len(instance.data_rows()):
raise Exception("Inputted column length must be equal to instance.dataset column length.\n" +
'new_column_values length: ' + str(len(new_column_values)) + '\n' +
'instance.data_rows() length: ' + str(len(instance.data_rows()))
)
# Append the inputted column to specified dataset
# pass argument to variable
new_column = new_column_values
# new column = merging of column name and column values
new_column.insert(0, new_column_name)
# for each row in the dataset, append the new column at the end
for i, row in enumerate(instance.dataset):
instance.dataset[i].append(new_column[i])
def remove_column(instance, target_column_header):
"""
Removes a column from dataset.
Args:
target_column_header(str): Name of the column to be removed.
Returns:
Nothing; modifies dataset.
Examples:
>>> example_data = [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'],
... ['4', 'Jun', '15.00']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'], ['4', 'Jun', '15.00']]
>>> my_list_data.remove_column('hour')
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
"""
#############################################################################################################
# the column header also needs to be included in removal process
from preprocessor.legacy_functions.get_header_index import get_header_index
target_index = get_header_index(target_column_header, instance.dataset)
for i, row in enumerate(instance.dataset):
del (instance.dataset[i][target_index])
def remove_columns(instance, target_column_headers_list):
"""
Removes multiple columns from dataset. Is a variation of .remove_column() method to support efficient removal
of multiple columns.
Args:
target_column_headers_list(list): A list of strings whose items are the header names of columns to
be removed
Returns:
Nothing; modifies dataset.
"""
if type(target_column_headers_list) == list:
pass
else:
raise Exception('The argument "target_column_headers_list" must be of "list" type.')
for each_column_header in target_column_headers_list:
instance.remove_column(each_column_header)
def replace_headers(instance, header_replacements_list):
"""
Replaces headers of a dataset.
Args:
header_replacements_list(list): A list of strings to replace headers
Returns:
Nothing; modifies the provided dataset.
Examples:
>>> example_data = [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data.replace_headers(['d', 'm'])
>>> print(my_list_data.dataset)
[['d', 'm'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
"""
# number of headers inputted should match the number of headers in the dataset
if len(header_replacements_list) == len(instance.headers()):
pass
else:
raise Exception('header_replacements_list should be the same length with instance.headers()' + '\n' +
'header_replacements_list length: ' + str(len(header_replacements_list)) + '\n' +
'instance.headers() length: ' + str(len(instance.headers()))
)
for i, each_header in enumerate(header_replacements_list):
instance.dataset[0][i] = each_header
class ListBuffer(ListData):
def __init__(self):
ListData.__init__(self)
# states
self.is_empty = True
def append_row(self, new_row):
"""
Overrides the ListData method of the same name to change buffer state to 'not empty' after adding something to
the buffer
Args:
new_row(list, bool, str, int): The object to be added as a new row to buffer
Returns:
ListData object (self)
Examples:
# initiate
>>> my_buffer = ListBuffer()
# empty?
>>> my_buffer.is_empty
True
# simple add
>>> a = my_buffer.append_row(['item 1', 'item 2', 'item 3']) # variable assignment is to suppress output
# fluent interface
>>> my_buffer.append_row(['item 4', 'item 5', 'item 6']). \
append_row(['item 7', 'item 8', 'item 9']).dataset
[['item 1', 'item 2', 'item 3'], ['item 4', 'item 5', 'item 6'], ['item 7', 'item 8', 'item 9']]
# empty now?
>>> my_buffer.is_empty
False
"""
ListData.append_row(self, new_row)
self.is_empty = False
return self
def is_each_row_balanced(self, exclude_special_rows_of_syntax=None):
"""
Checks whether each row in buffer is balanced (i.e., does not have unmatched parantheses, brackets, etc). Can
exclude special row types (e.g., comment) from evaluation.
Args:
exclude_special_rows_of_syntax(str): specifies what type of rows to exclude from evaluation
(e.g., comment rows). Uses predefined syntax settings per specified syntax (e.g., 'bibtex').
Keyword Args:
- bibtex (exclude_special_rows_of_syntax): sets evaluation exclusion criteria for bibtex syntax
Returns:
boolean
Examples:
>>> # an unbalanced row is present
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(['a', 'b', 'c']).append_row(['d', 'e', 'f']).dataset
[['a', 'b', 'c'], ['d', 'e', 'f']]
>>> my_buffer.append_row(['g', 'h' , '>'])\
.is_each_row_balanced()
False
>>> # single row from a bib file
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(' year = "2017",')\
.is_each_row_balanced()
True
>>> # bibtex entry start (no exception vs. exception)
>>> my_buffer.append_row('@article{96d9add3e2f44e8abbf030170689bc30,')\
.is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # bibtex comment (no exception vs. exception)
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row('% This is a comment with an unbalanced characters }]>')\
.is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # a full bibtex entry with an unbalanced curly bracket at title field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "{Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex') # error
False
>>> # the same entry with unbalanced curly bracket removed
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
"""
from preprocessor.string_tools import String
buffer = self.dataset
is_balanced_log = []
for each_row in buffer:
each_row = String(str(each_row))
if not each_row.is_balanced():
# print('row is not balanced: ', each_row)
### EXCLUSIONS FOR BIBTEX ###########################################
if exclude_special_rows_of_syntax == 'bibtex':
# print('special syntax = bibtex recognized')
# forgive these row types
if each_row.is_line_type('bibtex', 'start of entry') \
or each_row.is_line_type('bibtex', 'end of entry') \
or each_row.is_line_type('bibtex', 'comment'):
is_balanced_log.append(True)
# print("01: appended True to log, because the row is unbalanced but it passed exclusion rules", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row)
else:
is_balanced_log.append(False)
######################################################################
else:
is_balanced_log.append(False)
# print("02: appended False to log because row is unbalanced (no exclusion keyword specified) ", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row)
else:
is_balanced_log.append(True)
# print("03: appended True to log because row is balanced ", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row)
if False in is_balanced_log:
return False
else:
return True
def is_parsable(self, syntax_to_parse_by='bibtex'):
"""
Args:
syntax_to_parse_by:
Returns:
boolean
Examples:
# bibtex entry with no issues
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
True
# unmatched " in author field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. "Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
False
"""
if syntax_to_parse_by == 'bibtex':
# import and shorten bibtex parser function
from pybtex.database.input import bibtex
parser = bibtex.Parser()
# TODO: test can be made in memory instead of via temporary file (could use 'parser.parse_bytes')
with open('temp_buffer_dump.bib', 'w', encoding='utf8') as temp_buffer_dump_file:
for each_buffer_row in self.dataset:
print(each_buffer_row, file=temp_buffer_dump_file)
with open('temp_buffer_dump.bib', encoding='utf8') as temp_buffer_dump_file:
try:
parsed_file = parser.parse_file(temp_buffer_dump_file)
return True
except:
return False
| 54.831737 | 2,984 | 0.582331 | [
"MIT"
] | clokman/KFIR | preprocessor/ListData.py | 40,084 | Python |
"""[HTTPX](https://www.python-httpx.org/) 驱动适配
```bash
nb driver install httpx
# 或者
pip install nonebot2[httpx]
```
:::tip 提示
本驱动仅支持客户端 HTTP 连接
:::
FrontMatter:
sidebar_position: 3
description: nonebot.drivers.httpx 模块
"""
from typing import Type, AsyncGenerator
from contextlib import asynccontextmanager
from nonebot.typing import overrides
from nonebot.drivers._block_driver import BlockDriver
from nonebot.drivers import (
Request,
Response,
WebSocket,
HTTPVersion,
ForwardMixin,
ForwardDriver,
combine_driver,
)
try:
import httpx
except ImportError:
raise ImportError(
"Please install httpx by using `pip install nonebot2[httpx]`"
) from None
class Mixin(ForwardMixin):
"""HTTPX Mixin"""
@property
@overrides(ForwardMixin)
def type(self) -> str:
return "httpx"
@overrides(ForwardMixin)
async def request(self, setup: Request) -> Response:
async with httpx.AsyncClient(
http2=setup.version == HTTPVersion.H2,
proxies=setup.proxy,
follow_redirects=True,
) as client:
response = await client.request(
setup.method,
str(setup.url),
content=setup.content,
data=setup.data,
json=setup.json,
files=setup.files,
headers=tuple(setup.headers.items()),
timeout=setup.timeout,
)
return Response(
response.status_code,
headers=response.headers,
content=response.content,
request=setup,
)
@overrides(ForwardMixin)
@asynccontextmanager
async def websocket(self, setup: Request) -> AsyncGenerator[WebSocket, None]:
async with super(Mixin, self).websocket(setup) as ws:
yield ws
Driver: Type[ForwardDriver] = combine_driver(BlockDriver, Mixin) # type: ignore
"""HTTPX Driver"""
| 24.580247 | 81 | 0.616273 | [
"MIT"
] | A-kirami/nonebot2 | nonebot/drivers/httpx.py | 2,033 | Python |
from flask import render_template, request, redirect, send_from_directory, jsonify, Blueprint
from direct_answers import choose_direct_answer
from direct_answers import search_result_features
import indieweb_utils
import search_helpers, config, search_page_feeds
import requests
import json
import math
import spacy
import mf2py
main = Blueprint("main", __name__, static_folder="static", static_url_path="")
nlp = spacy.load('en_core_web_sm')
@main.route("/")
def home():
q = request.args.get("q")
return render_template("search/submit.html", title="IndieWeb Search", query=q)
@main.route("/autocomplete")
def search_autocomplete():
query = request.args.get("q")
suggest = requests.get("https://es-indieweb-search.jamesg.blog/suggest?q={}&pw={}".format(query, config.ELASTICSEARCH_PASSWORD))
return jsonify(suggest.json()), 200
@main.route("/results", methods=["GET", "POST"])
def results_page():
page = request.args.get("page")
site = request.args.get("site")
if site and site == "jamesg.blog":
# used for special jamesg.blog search redirect, not for open use
site = "".join([x for x in site if x.isalpha() or x == "."])
return redirect('/results?query=site:"{}"%20{}'.format(site, request.args.get("query")))
special_result = False
if not request.args.get("query"):
return redirect("/")
query_with_handled_spaces = request.args.get("query").replace("--", "").replace(" ", " ").strip()
allowed_chars = [" ", '"', ":", "-", "/", ".", "=", ","]
cleaned_value_for_query = ''.join(e for e in query_with_handled_spaces if e.isalnum() or e in allowed_chars).strip()
query_values_in_list, query_with_handled_spaces = search_helpers.handle_advanced_search(query_with_handled_spaces)
if cleaned_value_for_query.startswith("xray https://") or cleaned_value_for_query.startswith("xray http://"):
return redirect("https://xray.p3k.io/parse?url={}".format(cleaned_value_for_query.replace("xray ", "")))
session = requests.Session()
if cleaned_value_for_query == "random":
random_site = session.get("https://es-indieweb-search.jamesg.blog/random?pw={}".format(config.ELASTICSEARCH_PASSWORD)).json()["domain"]
return redirect("https://{}/".format(random_site))
if not request.args.get("query"):
return redirect("/")
full_query_with_full_stops = ''.join(e for e in query_with_handled_spaces if e.isalnum() or e == " " or e == ".")
if len(cleaned_value_for_query) == 0:
return redirect("/")
do_i_use = ""
pagination = "0"
if page:
# If page cannot be converted into an integer, redirect to homepage
try:
if int(page) > 1:
pagination = (int(page) - 1) * 10
except:
return redirect("/")
else:
page = 1
order = "score"
minimal = "false"
if request.args.get("order") == "date_asc":
order = "date_asc"
elif request.args.get("order") == "date_desc":
order = "date_desc"
cleaned_value_for_query = cleaned_value_for_query.replace("what is", "")
if request.args.get("format") and (request.args.get("format") == "json_feed" or request.args.get("format") == "jf2"):
minimal = "true"
query_params = ""
if query_values_in_list.get("site"):
query_params += "&site={}".format(query_values_in_list.get("site").replace("%", ""))
if request.args.get("query").startswith("discover"):
query_params += "&discover=true"
if "js:none" in request.args.get("query"):
query_params += "&js=false"
if query_values_in_list.get("category"):
query_params += "&category={}".format(query_values_in_list.get("category"))
if query_values_in_list.get("mf2prop"):
query_params += "&mf2_property={}".format(query_values_in_list.get("mf2prop"))
rows = session.get("https://es-indieweb-search.jamesg.blog/?pw={}&q={}&sort={}&from={}&minimal={}{}".format(
config.ELASTICSEARCH_PASSWORD,
cleaned_value_for_query.replace("who is", "").replace("code", "").replace("discover ", "").strip(),
order, str(pagination),
minimal,
query_params)
).json()
num_of_results = rows["hits"]["total"]["value"]
rows = rows["hits"]["hits"]
for r in rows:
if r["_source"].get("h_card"):
r["_source"]["h_card"] = json.loads(r["_source"]["h_card"])
else:
r["_source"]["h_card"] = None
cleaned_value = cleaned_value_for_query.lower()
if page == 1:
do_i_use, special_result = choose_direct_answer.choose_featured_snippet(
cleaned_value,
cleaned_value_for_query,
rows,
special_result,
full_query_with_full_stops,
session,
nlp
)
if len(rows) == 0:
out_of_bounds_page = True
final_query = cleaned_value_for_query
# this code doesn't work right now
# identify_mistakes = spell.unknown(cleaned_value.split('"')[-1].split(" "))
# final_query = ""
# suggestion = False
# cleaned_items = cleaned_value.split('"')[-1].split(" ")
# for w in range(0, len(cleaned_items)):
# if cleaned_items[w] in identify_mistakes and cleaned_items[w] != "":
# final_query += spell.correction(cleaned_items[w]) + " "
# suggestion = True
# final_query = " " + final_query
# else:
# final_query += cleaned_items[w] + " "
# final_query = "".join(cleaned_value.split('"')[:-1]) + '" ' + final_query
else:
out_of_bounds_page = False
suggestion = False
final_query = ""
if "random aeropress" in cleaned_value or "generate aeropress" in cleaned_value and request.args.get("type") != "image":
special_result = search_result_features.aeropress_recipe()
format = request.args.get("format")
if format == "json_feed":
json_feed = search_page_feeds.process_json_feed(rows, cleaned_value, page, format)
return json_feed
elif format == "jf2":
jf2_feed = search_page_feeds.process_jf2_feed(rows)
return jf2_feed
elif format == "rss":
rss_feed = search_page_feeds.process_rss_feed(rows, cleaned_value, page, format)
return rss_feed
elif format == "direct_serp_json":
if special_result:
return jsonify({"text": do_i_use, "featured_serp": special_result})
else:
return jsonify({"message": "no custom serp available on this search"})
elif format == "results_page_json":
return jsonify({"results": [r["_source"] for r in rows]})
# show one result if a featured snippet is available, even if there are no other results to show
if not special_result and not do_i_use and int(num_of_results) == 0:
num_of_results = 0
out_of_bounds_page = True
else:
out_of_bounds_page = False
return render_template("search/results.html",
results=rows,
number_of_results=int(num_of_results),
page=int(page),
page_count=int(math.ceil(num_of_results / 10)),
query=cleaned_value,
results_type=request.args.get("type"),
out_of_bounds_page=out_of_bounds_page,
ordered_by=request.args.get("order"),
base_results_query="/results?query=" + cleaned_value_for_query,
corrected_text=final_query,
suggestion_made=suggestion,
special_result=special_result,
do_i_use=do_i_use,
title="Search results for '{}' query".format(cleaned_value)
)
@main.route("/robots.txt")
def robots():
return send_from_directory(main.static_folder, "robots.txt")
@main.route('/assets/<path:path>')
def send_static_images(path):
return send_from_directory("static/", path)
@main.route("/changelog")
def changelog():
return render_template("changelog.html", title="IndieWeb Search Changelog")
@main.route("/advanced")
def advanced_search():
return render_template(
"search/advanced_search.html",
title="IndieWeb Search Advanced Search Options"
)
@main.route("/api/post-type")
def get_original_post_type():
page_to_check = request.args.get("url")
mf2_parsed = mf2py.parse(page_to_check)
if not mf2_parsed:
return jsonify({"status": "failed", "result": ""})
if not mf2_parsed["items"]:
return jsonify({"status": "failed", "result": ""})
# get h_entry
h_entry = [i for i in mf2_parsed["items"] if i["type"] == ["h-entry"]]
result = indieweb_utils.get_post_type(h_entry)
return jsonify({"status": "success", "result": result})
@main.route("/api/authorship")
def get_post_author():
page_to_check = request.args.get("url")
mf2_parsed = mf2py.parse(page_to_check)
if not mf2_parsed:
return jsonify({"status": "failed", "message": "No microformats could be found on this page", "author": []})
if not mf2_parsed["items"]:
return jsonify({"status": "failed", "message": "No microformats could be found on this page", "author": []})
# get h_entry
h_entry = [i for i in mf2_parsed["items"] if i["type"] == ["h-entry"]]
h_card = [i for i in mf2_parsed["items"] if i["type"] == ["h-card"]]
if not h_entry and h_card == []:
return jsonify({"status": "failed", "message": "No h-entry could be found on this page", "author": []})
if h_card == []:
for i in h_entry["items"]:
if i['type'] == ['h-entry']:
if i['properties'].get('author'):
# if author is h_card
if type(i['properties']['author'][0]) == dict and i['properties']['author'][0].get('type') == ['h-card']:
h_card = i['properties']['author'][0]
elif type(i['properties']['author']) == list:
h_card = i['properties']['author'][0]
result = indieweb_utils.discover_author(h_card, h_entry, page_to_check, [])
return jsonify({"status": "success", "result": result})
@main.route("/stats")
def stats():
count_request = requests.get("https://es-indieweb-search.jamesg.blog/count").json()
count = count_request["es_count"]["count"]
domains = count_request["domains"]
headers = {
"Authorization": config.ELASTICSEARCH_API_TOKEN
}
feed_breakdown_request = requests.get("https://es-indieweb-search.jamesg.blog/feed_breakdown", headers=headers).json()
special_stats = requests.get("https://es-indieweb-search.jamesg.blog/special_stats", headers=headers).json()
top_linked_assets = special_stats["top_ten_links"]
link_types = special_stats["link_microformat_instances"]
return render_template(
"search/stats.html",
count=count,
domains=domains,
title="IndieWeb Search Index Stats",
feed_breakdown=feed_breakdown_request,
top_linked_assets=top_linked_assets,
link_types=link_types
)
@main.route("/about")
def about():
return render_template("search/about.html", title="About IndieWeb Search") | 30.242515 | 137 | 0.701416 | [
"MIT"
] | capjamesg/indieweb-search | main.py | 10,101 | Python |
# 03_xkcd_multithread_download.py
# In dieser Übung geht es darum den Download der Comics zu beschleunigen
# indem man mehrere Threads zum downloaden nutzt.
import os, threading, requests, bs4
os.chdir(os.path.dirname(__file__))
target_dir='.\\comics'
source_url='https://xkcd.com'
# Prüfe ob Seite erreichbar
url_content=requests.get(source_url)
try:
url_content.raise_for_status()
except:
print('URL xkcd.com kann nicht aufgerufen werden. Script wird beendet.')
exit()
# Downloade die Comics als Thread
def download_comic(comic_url):
file_name=comic_url.split('/')[-1]
new_file=open(target_dir+'\\'+file_name, 'wb')
get_comic=requests.get(comic_url)
try:
get_comic.raise_for_status()
for chunk in get_comic.iter_content(10**6):
new_file.write(chunk)
new_file.close()
except:
print('Bild-URL %s ist fehlerhaft') % (comic_url)
# Sammle die Links zu den Comics und den weiterführenden Seiten
link_counter=0
threads=[]
def scrape_comic_links(url_name):
global link_counter, threads
while link_counter != int(comic_target_amount):
url_content=requests.get(url_name)
try:
url_content.raise_for_status()
bs4_object=bs4.BeautifulSoup(url_content.text, features='html.parser')
bs4_next_result=bs4_object.select('a[rel="prev"]')
next_url=bs4_next_result[0].get('href')
bs4_comic_result=bs4_object.select('div #comic img')
comic_url=bs4_comic_result[0].get('src')
comic_url='https://'+comic_url.lstrip('/')
url_name=source_url+next_url
link_counter+=1
# Starte Download-Thread
thread_object=threading.Thread(name='Download_Comic', target=download_comic, args=[comic_url])
thread_object.start()
# Füge diesen Thread einer Liste hinzu um später zu prüfen ob alles Abgearbeitet wurde.
threads.append(thread_object)
except:
print('URL nicht gefunden.')
return
else:
link_counter=0
return
while True:
print('Wieviele Comics sollen heruntergeladen werden?')
comic_target_amount=input()
if comic_target_amount.isdecimal():
scrape_comic_links(source_url)
# Warte bis alle Prozesse abgeschlossen sind.
for thread in threads:
thread.join()
print('Downloads abgeschlossen')
break
| 34.507042 | 106 | 0.672653 | [
"MIT"
] | Apop85/Scripts | Python/Buch_ATBS/Teil_2/Kapitel_15_Aufgaben_zeitlich_Planen_und_Programme_starten/03_xkcd_multithread_download/03_xkcd_multithread_download.py | 2,456 | Python |
import os
from setuptools import setup
from ec2audit import __version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='ec2audit',
version=__version__,
description='Dump all EC2 information to a folder suitable for version control',
long_description=read('README.rst'),
url='http://github.com/RisingOak/ec2audit',
author='Cosmin Stejerean',
author_email='[email protected]',
license='Apache License 2.0',
packages=['ec2audit'],
scripts=['bin/ec2audit'],
tests_require=open('test-requirements.txt').readlines(),
install_requires=open('requirements.txt').readlines(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Topic :: Utilities'
]
)
| 31.928571 | 86 | 0.6566 | [
"Apache-2.0"
] | cosmin/ec2audit | setup.py | 894 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List the versions within a key."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kms import flags
class List(base.ListCommand):
r"""List the versions within a key.
Lists all of the versions within the given key.
## EXAMPLES
The following command lists all versions within the
key `frodo`, keyring `fellowship`, and location `global`:
$ {command} --location global \
--keyring fellowship \
--key frodo
"""
@staticmethod
def Args(parser):
flags.AddKeyResourceFlags(parser)
parser.display_info.AddFormat('table(name, state)')
def Run(self, args):
# pylint: disable=line-too-long
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
crypto_key_ref = flags.ParseCryptoKeyName(args)
request = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListRequest(
parent=crypto_key_ref.RelativeName())
return list_pager.YieldFromList(
client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions,
request,
field='cryptoKeyVersions',
limit=args.limit,
batch_size_attribute='pageSize')
| 32.15873 | 95 | 0.748766 | [
"Apache-2.0"
] | bshaffer/google-cloud-sdk | lib/surface/kms/keys/versions/list.py | 2,026 | Python |
import networkx as nx
from operator import add
with open("input.txt","r") as f:
char_map=[list(l.strip('\n')) for l in f.readlines()]
top_labels=char_map[:2]
top_labels=list(map(add,top_labels[0],top_labels[1]))
bottom_labels=char_map[-2:]
bottom_labels=list(map(add,bottom_labels[0],bottom_labels[1]))
char_map=char_map[2:-2]
for i,c in enumerate(char_map[0]):
if c=='.':
char_map[0][i]=top_labels[i]
for i,c in enumerate(char_map[-1]):
if c=='.':
char_map[-1][i]=bottom_labels[i]
left_labels=[]
right_labels=[]
for i in range(len(char_map)):
right_labels.append((char_map[i].pop()+char_map[i].pop())[::-1])
left_labels.append(char_map[i].pop(0)+char_map[i].pop(0))
for i in range(len(char_map)):
if char_map[i][0]=='.':
char_map[i][0]=left_labels[i]
if char_map[i][-1]=='.':
char_map[i][-1]=right_labels[i]
inner_begin_i=next( (i for i,x in enumerate(char_map) if ' ' in x))
inner_end_i=len(char_map)-next( (i for i,x in enumerate(reversed(char_map)) if ' ' in x))-1
inner_begin_j=char_map[inner_begin_i].index(' ')
inner_end_j=len(char_map[inner_end_i])-list(reversed(char_map[inner_end_i])).index(' ')-1
inner_top_labels=[]
inner_bottom_labels=[]
inner_left_labels=[]
inner_right_labels=[]
for i in range(inner_begin_i,inner_end_i):
inner_left_labels.append(char_map[i][inner_begin_j]+char_map[i][inner_begin_j+1])
inner_right_labels.append(char_map[i][inner_end_j-1]+char_map[i][inner_end_j])
for j in range(inner_begin_j,inner_end_j):
inner_top_labels.append(char_map[inner_begin_i][j]+char_map[inner_begin_i+1][j])
inner_bottom_labels.append(char_map[inner_end_i-1][j]+char_map[inner_end_i][j])
for i in range(inner_begin_i,inner_end_i):
if char_map[i][inner_begin_j-1]=='.':
char_map[i][inner_begin_j-1]=inner_left_labels[i-inner_begin_i]
if char_map[i][inner_end_j+1]=='.':
char_map[i][inner_end_j+1]=inner_right_labels[i-inner_begin_i]
for j in range(inner_begin_j,inner_end_j):
if char_map[inner_begin_i-1][j]=='.':
char_map[inner_begin_i-1][j]=inner_top_labels[j-inner_begin_j]
if char_map[inner_end_i+1][j]=='.':
char_map[inner_end_i+1][j]=inner_bottom_labels[j-inner_begin_j]
g=nx.Graph()
labels=dict()
for i in range(len(char_map)):
for j in range(len(char_map[i])):
if char_map[i][j]=='.':
if char_map[i-1][j]!='#':#up
g.add_edge((i,j),(i-1,j))
if char_map[i+1][j]!='#':#down
g.add_edge((i,j),(i+1,j))
if char_map[i][j+1]!='#':#left
g.add_edge((i,j),(i,j+1))
if char_map[i][j-1]!='#':#right
g.add_edge((i,j),(i,j-1))
elif len(char_map[i][j])>1:#label
if char_map[i][j] not in labels:
labels[char_map[i][j]]=[(i,j)]
else:
labels[char_map[i][j]].append((i,j))
for v in labels.values():
if len(v)==2:
g.add_edge(v[0],v[1])
print(nx.shortest_path_length(g,labels["AA"][0],labels["ZZ"][0])) | 37.121951 | 91 | 0.640604 | [
"MIT"
] | Seralpa/Advent-of-code-2019 | day20/day20_1.py | 3,044 | Python |
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
class ConnectionDatabase(Entity, IocManager.Base):
__tablename__ = "ConnectionDatabase"
__table_args__ = {"schema": "Connection"}
ConnectionId = Column(Integer, ForeignKey('Connection.Connection.Id'))
ConnectorTypeId = Column(Integer, ForeignKey('Connection.ConnectorType.Id'))
Sid = Column(String(100), index=False, unique=False, nullable=True)
ServiceName = Column(String(100), index=False, unique=False, nullable=True)
DatabaseName = Column(String(100), index=False, unique=False, nullable=True)
ConnectorType = relationship("ConnectorType", back_populates="Databases")
def __init__(self,
ConnectionId: int = None,
ConnectorTypeId: int = None,
Host: str = None,
Port: int = None,
Sid: str = None,
ServiceName: str = None,
DatabaseName: str = None,
Connection = None,
ConnectorType = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.ConnectionId: str = ConnectionId
self.ConnectorTypeId: str = ConnectorTypeId
self.Host: str = Host
self.Port: int = Port
self.Sid: str = Sid
self.ServiceName: str = ServiceName
self.DatabaseName: str = DatabaseName
self.Connection = Connection
self.ConnectorType = ConnectorType | 42.864865 | 80 | 0.639344 | [
"MIT"
] | PythonDataIntegrator/pythondataintegrator | src/api/models/dao/connection/ConnectionDatabase.py | 1,586 | Python |
# Create your views here.
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import gettext as _
from openbook_common.responses import ApiMessageResponse
from openbook_common.serializers import CommonFollowRequestSerializer
from openbook_moderation.permissions import IsNotSuspended
from openbook_common.utils.helpers import normalise_request_data
from openbook_follows.serializers import FollowUserRequestSerializer, FollowSerializer, \
DeleteFollowSerializer, UpdateFollowSerializer, FollowUserSerializer, RequestToFollowUserSerializer, \
ApproveUserFollowRequestSerializer, RejectUserFollowRequestSerializer, ReceivedFollowRequestsRequestSerializer
class ReceivedFollowRequests(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request):
query_params = request.query_params.dict()
user = request.user
serializer = ReceivedFollowRequestsRequestSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
max_id = data.get('max_id')
count = data.get('count', 10)
received_follow_requests = user.get_received_follow_requests(max_id=max_id).order_by(
'-id')[:count]
response_serializer = CommonFollowRequestSerializer(received_follow_requests, many=True,
context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class RequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def put(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_request_to_follow_username = data.get('username')
user = request.user
with transaction.atomic():
follow_request = user.create_follow_request_for_user_with_username(user_to_request_to_follow_username)
response_serializer = CommonFollowRequestSerializer(follow_request, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class CancelRequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_cancel_request_for = data.get('username')
user = request.user
with transaction.atomic():
user.delete_follow_request_for_user_with_username(user_to_cancel_request_for)
return ApiMessageResponse(_('Follow request cancelled.'), status=status.HTTP_200_OK)
class ApproveUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = ApproveUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_approve_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.approve_follow_request_from_user_with_username(
user_username=user_to_approve_follow_request_from_username)
return ApiMessageResponse(_('Follow request approved.'), status=status.HTTP_200_OK)
class RejectUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RejectUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_reject_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.reject_follow_request_from_user_with_username(
user_username=user_to_reject_follow_request_from_username)
return ApiMessageResponse(_('Follow request rejected.'), status=status.HTTP_200_OK)
class FollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = FollowUserRequestSerializer(data=request_data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
user_to_follow_username = data.get('username')
user = request.user
User = get_user_model()
user_to_follow = User.objects.get(username=user_to_follow_username)
with transaction.atomic():
follow = user.follow_user_with_id(user_to_follow.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class UnfollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
user = request.user
serializer = DeleteFollowSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_unfollow_username = data.get('username')
User = get_user_model()
user_to_unfollow = User.objects.get(username=user_to_unfollow_username)
with transaction.atomic():
user.unfollow_user_with_id(user_to_unfollow.pk)
response_serializer = FollowUserSerializer(user_to_unfollow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class UpdateFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
user = request.user
serializer = UpdateFollowSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
followed_user_username = data.get('username')
User = get_user_model()
followed_user = User.objects.get(username=followed_user_username)
with transaction.atomic():
follow = user.update_follow_for_user_with_id(followed_user.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
def _prepare_request_data_for_validation(request_data):
request_data_copy = normalise_request_data(request_data)
lists_ids = request_data_copy.get('lists_ids', None)
if isinstance(lists_ids, str):
lists_ids = lists_ids.split(',')
request_data_copy['lists_ids'] = lists_ids
return request_data_copy
| 37.543147 | 114 | 0.741211 | [
"MIT"
] | OkunaOrg/okuna-api | openbook_follows/views.py | 7,396 | Python |
"""
本文件用以练习 manim 的各种常用对象
SVGMobject
ImageMobject
TextMobject
TexMobeject
Text
参考资料: https://www.bilibili.com/video/BV1CC4y1H7kp
XiaoCY 2020-11-27
"""
#%% 初始化
from manimlib.imports import *
"""
素材文件夹介绍
在 manim 中使用各种素材时可以使用绝对路径声明素材。
为了简单,可以创建 assets 文件夹并放置在 manim 路径下。
如此做,使用素材时可以不加路径。
assets/raster_images/ 放置 png 等格式的位图
assets/svg_images/ 放置 svg 格式的矢量图
assets/sounds/ 一般不用,也可以不创建
"""
#%% SVGMobject
"""
在 manim 中使用 SVG 图片可以直接使用 SVGMobject,
传入的第一个参数是指向 SVG 文件的字符串,
关键字参数包括 VMobject 的共有属性,有
填充样式
填充颜色 fill_color 或 color
不透明度 fill_opacity
线条样式
线条颜色 stroke_color 或 color
线条宽度 stroke_width
线条不透明度 stroke_opacity
背景线条样式
背景线条颜色 background_stroke_color 或 color
背景线条宽度 background_stroke_width
背景线条不透明度 background_stroke_opacity
光泽样式
光泽尺度 sheen_factor
光泽方向 sheen_direction
"""
class try_SVGMob(Scene): # 使用 class 创建一个场景,名字可自定义
def construct(self): # 这里 class 和 def 暂且当成是固定的套路吧
# 构造 SVGMobject --- 添加 SVG 图片
mob = SVGMobject(
"coin.svg",
color = BLUE, # manim 内置部分颜色,参见 https://manim.ml/constants.html#id7
stroke_width = 1.00
)
# SVGMobject 可以使用 VMobject 的所有动画
# 动画暂时不在本练习中具体讲解,这里仅作示意
self.play(FadeInFromLarge(mob))
self.wait(2)
#%% ImageMobject
"""
与 SVGMobject 相像,插入位图时可使用 ImageMobject,
传入的第一个参数是字符串表示的位图路径,
关键字参数仅有以下部分
图片高度 height (默认为2)
是否反色 invert (默认 False)
"""
class try_ImageMob(Scene):
def construct(self):
# 添加位图
mob = ImageMobject(
'smirk.png',
height = 3
)
# 由于 ImageMobject 不是 VMobject 的子类,很多动画无法使用
# 但是这里依然不对动画做深入讨论
self.play(FadeInFromLarge(mob))
self.wait(2)
#%% TextMobject
"""
TextMobject 会将字符串作为 LaTeX 的普通字符进行编译
传入的第一个参数为需要添加的字符串,其可以使用 LaTeX 表达式
由于 LaTeX 表达式中常含有反斜线,构造字符串时需要采用双反斜线
或在字符串前添加 r 以消除歧义
TextMobject 是 VMobject,其他属性同 SVGMobject
一个 TextMobject 中也可以传入多个字符串,会单独编译但连在一起显示
这时可以利用索引来访问各个字符串
其他可选参数
arg_separator 传入多个字符串时,设置字符串之间的字符,默认为空格
tex_to_color_map 为一个字典,根据键值自动拆分字符串进行上色
"""
class try_TextMob(Scene):
def construct(self):
# 定义一个字符串(数组),并通过下标进行访问
text = TextMobject(
"早安, \\TeX!",
r"你好,\LaTeX!",
tex_to_color_map = {"\\LaTeX": RED_B}
)
self.play(Write(text[0]))
self.wait(0.5)
# 注意这里用到 text[2] 和 text[3]
# 原本不存在,但 tex_to_color_map 为了上色将其自动拆分了
# text[0] = r"早安, \TeX!"
# text[1] = "你好,"
# text[2] = r"\LaTeX"
# text[3] = "!"
self.play(Transform(text[0],text[1:4]))
self.wait(1)
#%% TexMobject
"""
TexMobject 实际上提供了 align* 的数学环境,用于辨析 LaTeX 数学公式
其使用方法和 TextMobject 一样
关于数学公式的 LaTeX 代码可以使用妈叔的在线编辑器
(友情推荐) https://www.latexlive.com/
"""
class try_TexMob(Scene):
def construct(self):
text = TextMobject('欧拉公式') # 这是文字
tex = TexMobject( # 这是公式
r'\mathrm{e}^{\mathrm{i}\pi} + 1 = 0',
color = GREEN
)
self.play(FadeIn(text))
self.wait(0.5)
self.play(ReplacementTransform(text,tex))
self.wait(1)
#%% Text
"""
TextMobject 的文字是经过 LaTeX 编译的,
若仅使用文字(不用 \LaTeX 之类的特殊符号),可以使用 Text
传入的第一个参数为文字字符串,可选参数包括
颜色 color
字体 font
颜色 t2c (字典)
"""
class try_Text(Scene):
def construct(self):
text = Text(
"Hello World!",
font = "Adobe Heiti Std",
t2c = {
"H": BLUE,
"W": RED
}
)
self.play(Write(text))
self.wait(1) | 22.412121 | 93 | 0.593564 | [
"MIT"
] | iChunyu/LearnPython | manim/tutorial01_Mobjects.py | 5,248 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_predictor."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tf_api.builders import hyperparams_builder
from tf_api.core import box_predictor
from tf_api.protos import hyperparams_pb2
class MaskRCNNBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_get_boxes_with_five_classes(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
)
box_predictions = mask_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
class_predictions_with_background_shape) = sess.run(
[tf.shape(box_encodings),
tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [2, 1, 5, 4])
self.assertAllEqual(class_predictions_with_background_shape, [2, 1, 6])
def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self):
with self.assertRaises(ValueError):
box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_instance_masks=True)
def test_get_instance_masks(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
conv_hyperparams=self._build_arg_scope_with_hyperparams(
op_type=hyperparams_pb2.Hyperparams.CONV),
predict_instance_masks=True)
box_predictions = mask_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
mask_predictions = box_predictions[box_predictor.MASK_PREDICTIONS]
self.assertListEqual([2, 1, 5, 14, 14],
mask_predictions.get_shape().as_list())
def test_do_not_return_instance_masks_and_keypoints_without_request(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4)
box_predictions = mask_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
self.assertEqual(len(box_predictions), 2)
self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
in box_predictions)
def test_value_error_on_predict_keypoints(self):
with self.assertRaises(ValueError):
box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_keypoints=True)
class RfcnBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.build(conv_hyperparams, is_training=True)
def test_get_correct_box_encoding_and_class_prediction_shapes(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
proposal_boxes = tf.random_normal([4, 2, 4], dtype=tf.float32)
rfcn_box_predictor = box_predictor.RfcnBoxPredictor(
is_training=False,
num_classes=2,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
num_spatial_bins=[3, 3],
depth=4,
crop_size=[12, 12],
box_code_size=4
)
box_predictions = rfcn_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor',
proposal_boxes=proposal_boxes)
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
class_predictions_shape) = sess.run(
[tf.shape(box_encodings),
tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [8, 1, 2, 4])
self.assertAllEqual(class_predictions_shape, [8, 1, 3])
class ConvolutionalBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.build(conv_hyperparams, is_training=True)
def test_get_boxes_for_five_aspect_ratios_per_location(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features, num_predictions_per_location=5, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)])
self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
self.assertAllEqual(objectness_predictions_shape, [4, 320, 1])
def test_get_boxes_for_one_aspect_ratio_per_location(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)])
self.assertAllEqual(box_encodings_shape, [4, 64, 1, 4])
self.assertAllEqual(objectness_predictions_shape, [4, 64, 1])
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
self):
num_classes_without_background = 6
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features,
num_predictions_per_location=5,
scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape, class_predictions_with_background_shape
) = sess.run([
tf.shape(box_encodings), tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
self.assertAllEqual(class_predictions_with_background_shape,
[4, 320, num_classes_without_background+1])
def test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional(
self):
image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features, num_predictions_per_location=5, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
resolution = 32
expected_num_anchors = resolution*resolution*5
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)],
feed_dict={image_features:
np.random.rand(4, resolution, resolution, 64)})
self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
self.assertAllEqual(objectness_predictions_shape,
[4, expected_num_anchors, 1])
if __name__ == '__main__':
tf.test.main()
| 39 | 80 | 0.712172 | [
"MIT"
] | emma-mens/elk-recognition | src/animal_detection/tf_api/core/box_predictor_test.py | 12,636 | Python |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import collections
import copy
import itertools
import os
import pprint
import sys
import types
import warnings
from six import string_types
import archspec.cpu
try:
import clingo
# There may be a better way to detect this
clingo_cffi = hasattr(clingo.Symbol, '_rep')
except ImportError:
clingo = None # type: ignore
clingo_cffi = False
import llnl.util.lang
import llnl.util.tty as tty
import spack
import spack.architecture
import spack.bootstrap
import spack.cmd
import spack.compilers
import spack.config
import spack.dependency
import spack.directives
import spack.environment as ev
import spack.error
import spack.package
import spack.package_prefs
import spack.repo
import spack.spec
import spack.util.timer
import spack.variant
import spack.version
if sys.version_info >= (3, 3):
from collections.abc import Sequence # novm
else:
from collections import Sequence
#: Enumeration like object to mark version provenance
version_provenance = collections.namedtuple( # type: ignore
'VersionProvenance', ['external', 'packages_yaml', 'package_py', 'spec']
)(spec=0, external=1, packages_yaml=2, package_py=3)
#: String representation of version origins, to emit legible
# facts for the ASP solver
version_origin_str = {
0: 'spec',
1: 'external',
2: 'packages_yaml',
3: 'package_py'
}
#: Named tuple to contain information on declared versions
DeclaredVersion = collections.namedtuple(
'DeclaredVersion', ['version', 'idx', 'origin']
)
def issequence(obj):
if isinstance(obj, string_types):
return False
return isinstance(obj, (Sequence, types.GeneratorType))
def listify(args):
if len(args) == 1 and issequence(args[0]):
return list(args[0])
return list(args)
def packagize(pkg):
if isinstance(pkg, string_types):
return spack.repo.path.get_pkg_class(pkg)
else:
return pkg
def specify(spec):
if isinstance(spec, spack.spec.Spec):
return spec
return spack.spec.Spec(spec)
class AspObject(object):
"""Object representing a piece of ASP code."""
def _id(thing):
"""Quote string if needed for it to be a valid identifier."""
if isinstance(thing, AspObject):
return thing
elif isinstance(thing, bool):
return '"%s"' % str(thing)
elif isinstance(thing, int):
return str(thing)
else:
return '"%s"' % str(thing)
@llnl.util.lang.key_ordering
class AspFunction(AspObject):
def __init__(self, name, args=None):
self.name = name
self.args = () if args is None else args
def _cmp_key(self):
return (self.name, self.args)
def __call__(self, *args):
return AspFunction(self.name, args)
def symbol(self, positive=True):
def argify(arg):
if isinstance(arg, bool):
return clingo.String(str(arg))
elif isinstance(arg, int):
return clingo.Number(arg)
else:
return clingo.String(str(arg))
return clingo.Function(
self.name, [argify(arg) for arg in self.args], positive=positive)
def __str__(self):
return "%s(%s)" % (
self.name, ', '.join(str(_id(arg)) for arg in self.args))
def __repr__(self):
return str(self)
class AspFunctionBuilder(object):
def __getattr__(self, name):
return AspFunction(name)
fn = AspFunctionBuilder()
def all_compilers_in_config():
return spack.compilers.all_compilers()
def extend_flag_list(flag_list, new_flags):
"""Extend a list of flags, preserving order and precedence.
Add new_flags at the end of flag_list. If any flags in new_flags are
already in flag_list, they are moved to the end so that they take
higher precedence on the compile line.
"""
for flag in new_flags:
if flag in flag_list:
flag_list.remove(flag)
flag_list.append(flag)
def check_same_flags(flag_dict_1, flag_dict_2):
"""Return True if flag dicts contain the same flags regardless of order."""
types = set(flag_dict_1.keys()).union(set(flag_dict_2.keys()))
for t in types:
values1 = set(flag_dict_1.get(t, []))
values2 = set(flag_dict_2.get(t, []))
assert values1 == values2
def check_packages_exist(specs):
"""Ensure all packages mentioned in specs exist."""
repo = spack.repo.path
for spec in specs:
for s in spec.traverse():
try:
check_passed = repo.exists(s.name) or repo.is_virtual(s.name)
except Exception as e:
msg = 'Cannot find package: {0}'.format(str(e))
check_passed = False
tty.debug(msg)
if not check_passed:
raise spack.repo.UnknownPackageError(str(s.fullname))
class Result(object):
"""Result of an ASP solve."""
def __init__(self, specs, asp=None):
self.asp = asp
self.satisfiable = None
self.optimal = None
self.warnings = None
self.nmodels = 0
# specs ordered by optimization level
self.answers = []
self.cores = []
# names of optimization criteria
self.criteria = []
# Abstract user requests
self.abstract_specs = specs
# Concrete specs
self._concrete_specs = None
def print_cores(self):
for core in self.cores:
tty.msg(
"The following constraints are unsatisfiable:",
*sorted(str(symbol) for symbol in core))
@property
def specs(self):
"""List of concretized specs satisfying the initial
abstract request.
"""
# The specs were already computed, return them
if self._concrete_specs:
return self._concrete_specs
# Assert prerequisite
msg = 'cannot compute specs ["satisfiable" is not True ]'
assert self.satisfiable, msg
self._concrete_specs = []
best = min(self.answers)
opt, _, answer = best
for input_spec in self.abstract_specs:
key = input_spec.name
if input_spec.virtual:
providers = [spec.name for spec in answer.values()
if spec.package.provides(key)]
key = providers[0]
self._concrete_specs.append(answer[key])
return self._concrete_specs
def _normalize_packages_yaml(packages_yaml):
normalized_yaml = copy.copy(packages_yaml)
for pkg_name in packages_yaml:
is_virtual = spack.repo.path.is_virtual(pkg_name)
if pkg_name == 'all' or not is_virtual:
continue
# Remove the virtual entry from the normalized configuration
data = normalized_yaml.pop(pkg_name)
is_buildable = data.get('buildable', True)
if not is_buildable:
for provider in spack.repo.path.providers_for(pkg_name):
entry = normalized_yaml.setdefault(provider.name, {})
entry['buildable'] = False
externals = data.get('externals', [])
keyfn = lambda x: spack.spec.Spec(x['spec']).name
for provider, specs in itertools.groupby(externals, key=keyfn):
entry = normalized_yaml.setdefault(provider, {})
entry.setdefault('externals', []).extend(specs)
return normalized_yaml
class PyclingoDriver(object):
def __init__(self, cores=True, asp=None):
"""Driver for the Python clingo interface.
Arguments:
cores (bool): whether to generate unsatisfiable cores for better
error reporting.
asp (file-like): optional stream to write a text-based ASP program
for debugging or verification.
"""
global clingo
if not clingo:
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
import clingo
self.out = asp or llnl.util.lang.Devnull()
self.cores = cores
def title(self, name, char):
self.out.write('\n')
self.out.write("%" + (char * 76))
self.out.write('\n')
self.out.write("%% %s\n" % name)
self.out.write("%" + (char * 76))
self.out.write('\n')
def h1(self, name):
self.title(name, "=")
def h2(self, name):
self.title(name, "-")
def newline(self):
self.out.write('\n')
def fact(self, head):
"""ASP fact (a rule without a body)."""
symbol = head.symbol() if hasattr(head, 'symbol') else head
self.out.write("%s.\n" % str(symbol))
atom = self.backend.add_atom(symbol)
self.backend.add_rule([atom], [], choice=self.cores)
if self.cores:
self.assumptions.append(atom)
def solve(
self, solver_setup, specs, dump=None, nmodels=0,
timers=False, stats=False, tests=False
):
timer = spack.util.timer.Timer()
# Initialize the control object for the solver
self.control = clingo.Control()
self.control.configuration.solve.models = nmodels
self.control.configuration.asp.trans_ext = 'all'
self.control.configuration.asp.eq = '5'
self.control.configuration.configuration = 'tweety'
self.control.configuration.solve.parallel_mode = '2'
self.control.configuration.solver.opt_strategy = "usc,one"
# set up the problem -- this generates facts and rules
self.assumptions = []
with self.control.backend() as backend:
self.backend = backend
solver_setup.setup(self, specs, tests=tests)
timer.phase("setup")
# read in the main ASP program and display logic -- these are
# handwritten, not generated, so we load them as resources
parent_dir = os.path.dirname(__file__)
self.control.load(os.path.join(parent_dir, 'concretize.lp'))
self.control.load(os.path.join(parent_dir, "display.lp"))
timer.phase("load")
# Grounding is the first step in the solve -- it turns our facts
# and first-order logic rules into propositional logic.
self.control.ground([("base", [])])
timer.phase("ground")
# With a grounded program, we can run the solve.
result = Result(specs)
models = [] # stable models if things go well
cores = [] # unsatisfiable cores if they do not
def on_model(model):
models.append((model.cost, model.symbols(shown=True, terms=True)))
solve_kwargs = {"assumptions": self.assumptions,
"on_model": on_model,
"on_core": cores.append}
if clingo_cffi:
solve_kwargs["on_unsat"] = cores.append
solve_result = self.control.solve(**solve_kwargs)
timer.phase("solve")
# once done, construct the solve result
result.satisfiable = solve_result.satisfiable
def stringify(x):
if clingo_cffi:
# Clingo w/ CFFI will throw an exception on failure
try:
return x.string
except RuntimeError:
return str(x)
else:
return x.string or str(x)
if result.satisfiable:
# build spec from the best model
builder = SpecBuilder(specs)
min_cost, best_model = min(models)
tuples = [
(sym.name, [stringify(a) for a in sym.arguments])
for sym in best_model
]
answers = builder.build_specs(tuples)
# add best spec to the results
result.answers.append((list(min_cost), 0, answers))
# pull optimization criteria names out of the solution
criteria = [
(int(args[0]), args[1]) for name, args in tuples
if name == "opt_criterion"
]
result.criteria = [t[1] for t in sorted(criteria, reverse=True)]
# record the number of models the solver considered
result.nmodels = len(models)
elif cores:
symbols = dict(
(a.literal, a.symbol)
for a in self.control.symbolic_atoms
)
for core in cores:
core_symbols = []
for atom in core:
sym = symbols[atom]
if sym.name == "rule":
sym = sym.arguments[0].string
core_symbols.append(sym)
result.cores.append(core_symbols)
if timers:
timer.write_tty()
print()
if stats:
print("Statistics:")
pprint.pprint(self.control.statistics)
return result
class SpackSolverSetup(object):
"""Class to set up and run a Spack concretization solve."""
def __init__(self):
self.gen = None # set by setup()
self.declared_versions = {}
self.possible_versions = {}
self.deprecated_versions = {}
self.possible_virtuals = None
self.possible_compilers = []
self.variant_values_from_specs = set()
self.version_constraints = set()
self.target_constraints = set()
self.compiler_version_constraints = set()
self.post_facts = []
# id for dummy variables
self._condition_id_counter = itertools.count()
# Caches to optimize the setup phase of the solver
self.target_specs_cache = None
def pkg_version_rules(self, pkg):
"""Output declared versions of a package.
This uses self.possible_versions so that we include any versions
that arise from a spec.
"""
def key_fn(version):
# Origins are sorted by order of importance:
# 1. Spec from command line
# 2. Externals
# 3. Package preferences
# 4. Directives in package.py
return version.origin, version.idx
pkg = packagize(pkg)
declared_versions = self.declared_versions[pkg.name]
most_to_least_preferred = sorted(declared_versions, key=key_fn)
for weight, declared_version in enumerate(most_to_least_preferred):
self.gen.fact(fn.version_declared(
pkg.name, declared_version.version, weight,
version_origin_str[declared_version.origin]
))
# Declare deprecated versions for this package, if any
deprecated = self.deprecated_versions[pkg.name]
for v in sorted(deprecated):
self.gen.fact(fn.deprecated_version(pkg.name, v))
def spec_versions(self, spec):
"""Return list of clauses expressing spec's version constraints."""
spec = specify(spec)
assert spec.name
if spec.concrete:
return [fn.version(spec.name, spec.version)]
if spec.versions == spack.version.ver(":"):
return []
# record all version constraints for later
self.version_constraints.add((spec.name, spec.versions))
return [fn.version_satisfies(spec.name, spec.versions)]
def target_ranges(self, spec, single_target_fn):
target = spec.architecture.target
# Check if the target is a concrete target
if str(target) in archspec.cpu.TARGETS:
return [single_target_fn(spec.name, target)]
self.target_constraints.add((spec.name, target))
return [fn.node_target_satisfies(spec.name, target)]
def conflict_rules(self, pkg):
for trigger, constraints in pkg.conflicts.items():
trigger_id = self.condition(spack.spec.Spec(trigger), name=pkg.name)
self.gen.fact(fn.conflict_trigger(trigger_id))
for constraint, _ in constraints:
constraint_id = self.condition(constraint, name=pkg.name)
self.gen.fact(fn.conflict(pkg.name, trigger_id, constraint_id))
self.gen.newline()
def available_compilers(self):
"""Facts about available compilers."""
self.gen.h2("Available compilers")
compilers = self.possible_compilers
compiler_versions = collections.defaultdict(lambda: set())
for compiler in compilers:
compiler_versions[compiler.name].add(compiler.version)
for compiler in sorted(compiler_versions):
for v in sorted(compiler_versions[compiler]):
self.gen.fact(fn.compiler_version(compiler, v))
self.gen.newline()
def compiler_defaults(self):
"""Set compiler defaults, given a list of possible compilers."""
self.gen.h2("Default compiler preferences")
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(
compiler_list, key=lambda x: (x.name, x.version), reverse=True)
ppk = spack.package_prefs.PackagePrefs("all", 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for i, cspec in enumerate(matches):
f = fn.default_compiler_preference(cspec.name, cspec.version, i)
self.gen.fact(f)
# Enumerate target families. This may be redundant, but compilers with
# custom versions will be able to concretize properly.
for entry in spack.compilers.all_compilers_config():
compiler_entry = entry['compiler']
cspec = spack.spec.CompilerSpec(compiler_entry['spec'])
if not compiler_entry.get('target', None):
continue
self.gen.fact(fn.compiler_supports_target(
cspec.name, cspec.version, compiler_entry['target']
))
def compiler_supports_os(self):
compilers_yaml = spack.compilers.all_compilers_config()
for entry in compilers_yaml:
c = spack.spec.CompilerSpec(entry['compiler']['spec'])
operating_system = entry['compiler']['operating_system']
self.gen.fact(fn.compiler_supports_os(
c.name, c.version, operating_system
))
def package_compiler_defaults(self, pkg):
"""Facts about packages' compiler prefs."""
packages = spack.config.get("packages")
pkg_prefs = packages.get(pkg.name)
if not pkg_prefs or "compiler" not in pkg_prefs:
return
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(
compiler_list, key=lambda x: (x.name, x.version), reverse=True)
ppk = spack.package_prefs.PackagePrefs(pkg.name, 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for i, cspec in enumerate(reversed(matches)):
self.gen.fact(fn.node_compiler_preference(
pkg.name, cspec.name, cspec.version, -i * 100
))
def pkg_rules(self, pkg, tests):
pkg = packagize(pkg)
# versions
self.pkg_version_rules(pkg)
self.gen.newline()
# variants
for name, variant in sorted(pkg.variants.items()):
self.gen.fact(fn.variant(pkg.name, name))
single_value = not variant.multi
if single_value:
self.gen.fact(fn.variant_single_value(pkg.name, name))
self.gen.fact(
fn.variant_default_value_from_package_py(
pkg.name, name, variant.default)
)
else:
spec_variant = variant.make_default()
defaults = spec_variant.value
for val in sorted(defaults):
self.gen.fact(
fn.variant_default_value_from_package_py(
pkg.name, name, val)
)
values = variant.values
if values is None:
values = []
elif isinstance(values, spack.variant.DisjointSetsOfValues):
union = set()
# Encode the disjoint sets in the logic program
for sid, s in enumerate(values.sets):
for value in s:
self.gen.fact(fn.variant_value_from_disjoint_sets(
pkg.name, name, value, sid
))
union.update(s)
values = union
# make sure that every variant has at least one possible value
if not values:
values = [variant.default]
for value in sorted(values):
self.gen.fact(fn.variant_possible_value(pkg.name, name, value))
self.gen.newline()
# conflicts
self.conflict_rules(pkg)
# default compilers for this package
self.package_compiler_defaults(pkg)
# virtuals
self.package_provider_rules(pkg)
# dependencies
self.package_dependencies_rules(pkg, tests)
# virtual preferences
self.virtual_preferences(
pkg.name,
lambda v, p, i: self.gen.fact(
fn.pkg_provider_preference(pkg.name, v, p, i)
)
)
def condition(self, required_spec, imposed_spec=None, name=None):
"""Generate facts for a dependency or virtual provider condition.
Arguments:
required_spec (spack.spec.Spec): the spec that triggers this condition
imposed_spec (spack.spec.Spec or None): the sepc with constraints that
are imposed when this condition is triggered
name (str or None): name for `required_spec` (required if
required_spec is anonymous, ignored if not)
Returns:
int: id of the condition created by this function
"""
named_cond = required_spec.copy()
named_cond.name = named_cond.name or name
assert named_cond.name, "must provide name for anonymous condtions!"
condition_id = next(self._condition_id_counter)
self.gen.fact(fn.condition(condition_id))
# requirements trigger the condition
requirements = self.checked_spec_clauses(
named_cond, body=True, required_from=name)
for pred in requirements:
self.gen.fact(
fn.condition_requirement(condition_id, pred.name, *pred.args)
)
if imposed_spec:
imposed_constraints = self.checked_spec_clauses(
imposed_spec, body=False, required_from=name)
for pred in imposed_constraints:
# imposed "node"-like conditions are no-ops
if pred.name in ("node", "virtual_node"):
continue
self.gen.fact(
fn.imposed_constraint(condition_id, pred.name, *pred.args)
)
return condition_id
def package_provider_rules(self, pkg):
for provider_name in sorted(set(s.name for s in pkg.provided.keys())):
self.gen.fact(fn.possible_provider(pkg.name, provider_name))
for provided, whens in pkg.provided.items():
for when in whens:
condition_id = self.condition(when, provided, pkg.name)
self.gen.fact(fn.provider_condition(
condition_id, when.name, provided.name
))
self.gen.newline()
def package_dependencies_rules(self, pkg, tests):
"""Translate 'depends_on' directives into ASP logic."""
for _, conditions in sorted(pkg.dependencies.items()):
for cond, dep in sorted(conditions.items()):
deptypes = dep.type.copy()
# Skip test dependencies if they're not requested
if not tests:
deptypes.discard("test")
# ... or if they are requested only for certain packages
if not isinstance(tests, bool) and pkg.name not in tests:
deptypes.discard("test")
# if there are no dependency types to be considered
# anymore, don't generate the dependency
if not deptypes:
continue
condition_id = self.condition(cond, dep.spec, pkg.name)
self.gen.fact(fn.dependency_condition(
condition_id, pkg.name, dep.spec.name
))
for t in sorted(deptypes):
# there is a declared dependency of type t
self.gen.fact(fn.dependency_type(condition_id, t))
self.gen.newline()
def virtual_preferences(self, pkg_name, func):
"""Call func(vspec, provider, i) for each of pkg's provider prefs."""
config = spack.config.get("packages")
pkg_prefs = config.get(pkg_name, {}).get("providers", {})
for vspec, providers in pkg_prefs.items():
if vspec not in self.possible_virtuals:
continue
for i, provider in enumerate(providers):
provider_name = spack.spec.Spec(provider).name
func(vspec, provider_name, i)
def provider_defaults(self):
self.gen.h2("Default virtual providers")
assert self.possible_virtuals is not None
self.virtual_preferences(
"all",
lambda v, p, i: self.gen.fact(
fn.default_provider_preference(v, p, i))
)
def external_packages(self):
"""Facts on external packages, as read from packages.yaml"""
# Read packages.yaml and normalize it, so that it
# will not contain entries referring to virtual
# packages.
packages_yaml = spack.config.get("packages")
packages_yaml = _normalize_packages_yaml(packages_yaml)
self.gen.h1('External packages')
for pkg_name, data in packages_yaml.items():
if pkg_name == 'all':
continue
# This package does not appear in any repository
if pkg_name not in spack.repo.path:
continue
self.gen.h2('External package: {0}'.format(pkg_name))
# Check if the external package is buildable. If it is
# not then "external(<pkg>)" is a fact.
external_buildable = data.get('buildable', True)
if not external_buildable:
self.gen.fact(fn.external_only(pkg_name))
# Read a list of all the specs for this package
externals = data.get('externals', [])
external_specs = [spack.spec.Spec(x['spec']) for x in externals]
# Order the external versions to prefer more recent versions
# even if specs in packages.yaml are not ordered that way
external_versions = [
(x.version, external_id)
for external_id, x in enumerate(external_specs)
]
external_versions = [
(v, idx, external_id)
for idx, (v, external_id) in
enumerate(sorted(external_versions, reverse=True))
]
for version, idx, external_id in external_versions:
self.declared_versions[pkg_name].append(DeclaredVersion(
version=version,
idx=idx,
origin=version_provenance.external
))
# Declare external conditions with a local index into packages.yaml
for local_idx, spec in enumerate(external_specs):
condition_id = self.condition(spec)
self.gen.fact(
fn.possible_external(condition_id, pkg_name, local_idx)
)
self.possible_versions[spec.name].add(spec.version)
self.gen.newline()
def preferred_variants(self, pkg_name):
"""Facts on concretization preferences, as read from packages.yaml"""
preferences = spack.package_prefs.PackagePrefs
preferred_variants = preferences.preferred_variants(pkg_name)
if not preferred_variants:
return
for variant_name in sorted(preferred_variants):
variant = preferred_variants[variant_name]
values = variant.value
if not isinstance(values, tuple):
values = (values,)
# perform validation of the variant and values
spec = spack.spec.Spec(pkg_name)
spec.update_variant_validate(variant_name, values)
for value in values:
self.variant_values_from_specs.add(
(pkg_name, variant.name, value)
)
self.gen.fact(fn.variant_default_value_from_packages_yaml(
pkg_name, variant.name, value
))
def preferred_targets(self, pkg_name):
key_fn = spack.package_prefs.PackagePrefs(pkg_name, 'target')
if not self.target_specs_cache:
self.target_specs_cache = [
spack.spec.Spec('target={0}'.format(target_name))
for target_name in archspec.cpu.TARGETS
]
target_specs = self.target_specs_cache
preferred_targets = [x for x in target_specs if key_fn(x) < 0]
if not preferred_targets:
return
preferred = preferred_targets[0]
self.gen.fact(fn.package_target_weight(
str(preferred.architecture.target), pkg_name, -30
))
def flag_defaults(self):
self.gen.h2("Compiler flag defaults")
# types of flags that can be on specs
for flag in spack.spec.FlagMap.valid_compiler_flags():
self.gen.fact(fn.flag_type(flag))
self.gen.newline()
# flags from compilers.yaml
compilers = all_compilers_in_config()
for compiler in compilers:
for name, flags in compiler.flags.items():
for flag in flags:
self.gen.fact(fn.compiler_version_flag(
compiler.name, compiler.version, name, flag))
def checked_spec_clauses(self, *args, **kwargs):
"""Wrap a call to spec clauses into a try/except block that raise
a comprehensible error message in case of failure.
"""
requestor = kwargs.pop('required_from', None)
try:
clauses = self.spec_clauses(*args, **kwargs)
except RuntimeError as exc:
msg = str(exc)
if requestor:
msg += ' [required from package "{0}"]'.format(requestor)
raise RuntimeError(msg)
return clauses
def spec_clauses(self, spec, body=False, transitive=True):
"""Return a list of clauses for a spec mandates are true.
Arguments:
spec (spack.spec.Spec): the spec to analyze
body (bool): if True, generate clauses to be used in rule bodies
(final values) instead of rule heads (setters).
transitive (bool): if False, don't generate clauses from
dependencies (default True)
"""
clauses = []
# TODO: do this with consistent suffixes.
class Head(object):
node = fn.node
virtual_node = fn.virtual_node
node_platform = fn.node_platform_set
node_os = fn.node_os_set
node_target = fn.node_target_set
variant_value = fn.variant_set
node_compiler = fn.node_compiler_set
node_compiler_version = fn.node_compiler_version_set
node_flag = fn.node_flag_set
class Body(object):
node = fn.node
virtual_node = fn.virtual_node
node_platform = fn.node_platform
node_os = fn.node_os
node_target = fn.node_target
variant_value = fn.variant_value
node_compiler = fn.node_compiler
node_compiler_version = fn.node_compiler_version
node_flag = fn.node_flag
f = Body if body else Head
if spec.name:
clauses.append(
f.node(spec.name) if not spec.virtual
else f.virtual_node(spec.name))
clauses.extend(self.spec_versions(spec))
# seed architecture at the root (we'll propagate later)
# TODO: use better semantics.
arch = spec.architecture
if arch:
if arch.platform:
clauses.append(f.node_platform(spec.name, arch.platform))
if arch.os:
clauses.append(f.node_os(spec.name, arch.os))
if arch.target:
clauses.extend(self.target_ranges(spec, f.node_target))
# variants
for vname, variant in sorted(spec.variants.items()):
values = variant.value
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
# * is meaningless for concretization -- just for matching
if value == '*':
continue
# validate variant value only if spec not concrete
if not spec.concrete:
reserved_names = spack.directives.reserved_names
if not spec.virtual and vname not in reserved_names:
try:
variant_def = spec.package.variants[vname]
except KeyError:
msg = 'variant "{0}" not found in package "{1}"'
raise RuntimeError(msg.format(vname, spec.name))
else:
variant_def.validate_or_raise(variant, spec.package)
clauses.append(f.variant_value(spec.name, vname, value))
# Tell the concretizer that this is a possible value for the
# variant, to account for things like int/str values where we
# can't enumerate the valid values
self.variant_values_from_specs.add((spec.name, vname, value))
# compiler and compiler version
if spec.compiler:
clauses.append(f.node_compiler(spec.name, spec.compiler.name))
if spec.compiler.concrete:
clauses.append(f.node_compiler_version(
spec.name, spec.compiler.name, spec.compiler.version))
elif spec.compiler.versions:
clauses.append(
fn.node_compiler_version_satisfies(
spec.name, spec.compiler.name, spec.compiler.versions))
self.compiler_version_constraints.add(
(spec.name, spec.compiler))
# compiler flags
for flag_type, flags in spec.compiler_flags.items():
for flag in flags:
clauses.append(f.node_flag(spec.name, flag_type, flag))
# dependencies
if spec.concrete:
clauses.append(fn.concrete(spec.name))
# TODO: add concrete depends_on() facts for concrete dependencies
# add all clauses from dependencies
if transitive:
for dep in spec.traverse(root=False):
clauses.extend(self.spec_clauses(dep, body, transitive=False))
return clauses
def build_version_dict(self, possible_pkgs, specs):
"""Declare any versions in specs not declared in packages."""
self.declared_versions = collections.defaultdict(list)
self.possible_versions = collections.defaultdict(set)
self.deprecated_versions = collections.defaultdict(set)
packages_yaml = spack.config.get("packages")
packages_yaml = _normalize_packages_yaml(packages_yaml)
for pkg_name in possible_pkgs:
pkg = spack.repo.get(pkg_name)
# All the versions from the corresponding package.py file. Since concepts
# like being a "develop" version or being preferred exist only at a
# package.py level, sort them in this partial list here
def key_fn(item):
version, info = item
# When COMPARING VERSIONS, the '@develop' version is always
# larger than other versions. BUT when CONCRETIZING, the largest
# NON-develop version is selected by default.
return info.get('preferred', False), not version.isdevelop(), version
for idx, item in enumerate(sorted(
pkg.versions.items(), key=key_fn, reverse=True
)):
v, version_info = item
self.possible_versions[pkg_name].add(v)
self.declared_versions[pkg_name].append(DeclaredVersion(
version=v, idx=idx, origin=version_provenance.package_py
))
deprecated = version_info.get('deprecated', False)
if deprecated:
self.deprecated_versions[pkg_name].add(v)
# All the preferred version from packages.yaml, versions in external
# specs will be computed later
version_preferences = packages_yaml.get(pkg_name, {}).get("version", [])
for idx, v in enumerate(version_preferences):
self.declared_versions[pkg_name].append(DeclaredVersion(
version=v, idx=idx, origin=version_provenance.packages_yaml
))
for spec in specs:
for dep in spec.traverse():
if dep.versions.concrete:
# Concrete versions used in abstract specs from cli. They
# all have idx equal to 0, which is the best possible. In
# any case they will be used due to being set from the cli.
self.declared_versions[dep.name].append(DeclaredVersion(
version=dep.version,
idx=0,
origin=version_provenance.spec
))
self.possible_versions[dep.name].add(dep.version)
def _supported_targets(self, compiler_name, compiler_version, targets):
"""Get a list of which targets are supported by the compiler.
Results are ordered most to least recent.
"""
supported = []
for target in targets:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
target.optimization_flags(compiler_name, compiler_version)
supported.append(target)
except archspec.cpu.UnsupportedMicroarchitecture:
continue
except ValueError:
continue
return sorted(supported, reverse=True)
def platform_defaults(self):
self.gen.h2('Default platform')
platform = spack.architecture.platform()
self.gen.fact(fn.node_platform_default(platform))
def os_defaults(self, specs):
self.gen.h2('Possible operating systems')
platform = spack.architecture.platform()
# create set of OS's to consider
possible = set([
platform.front_os, platform.back_os, platform.default_os])
for spec in specs:
if spec.architecture and spec.architecture.os:
possible.add(spec.architecture.os)
# make directives for possible OS's
for possible_os in sorted(possible):
self.gen.fact(fn.os(possible_os))
# mark this one as default
self.gen.fact(fn.node_os_default(platform.default_os))
def target_defaults(self, specs):
"""Add facts about targets and target compatibility."""
self.gen.h2('Default target')
platform = spack.architecture.platform()
uarch = archspec.cpu.TARGETS.get(platform.default)
self.gen.h2('Target compatibility')
compatible_targets = [uarch] + uarch.ancestors
additional_targets_in_family = sorted([
t for t in archspec.cpu.TARGETS.values()
if (t.family.name == uarch.family.name and
t not in compatible_targets)
], key=lambda x: len(x.ancestors), reverse=True)
compatible_targets += additional_targets_in_family
compilers = self.possible_compilers
# this loop can be used to limit the number of targets
# considered. Right now we consider them all, but it seems that
# many targets can make things slow.
# TODO: investigate this.
best_targets = set([uarch.family.name])
for compiler in sorted(compilers):
supported = self._supported_targets(
compiler.name, compiler.version, compatible_targets
)
# If we can't find supported targets it may be due to custom
# versions in the spec, e.g. gcc@foo. Try to match the
# real_version from the compiler object to get more accurate
# results.
if not supported:
compiler_obj = spack.compilers.compilers_for_spec(compiler)
compiler_obj = compiler_obj[0]
supported = self._supported_targets(
compiler.name,
compiler_obj.real_version,
compatible_targets
)
if not supported:
continue
for target in supported:
best_targets.add(target.name)
self.gen.fact(fn.compiler_supports_target(
compiler.name, compiler.version, target.name))
self.gen.fact(fn.compiler_supports_target(
compiler.name, compiler.version, uarch.family.name))
# add any targets explicitly mentioned in specs
for spec in specs:
if not spec.architecture or not spec.architecture.target:
continue
target = archspec.cpu.TARGETS.get(spec.target.name)
if not target:
self.target_ranges(spec, None)
continue
if target not in compatible_targets:
compatible_targets.append(target)
i = 0
for target in compatible_targets:
self.gen.fact(fn.target(target.name))
self.gen.fact(fn.target_family(target.name, target.family.name))
for parent in sorted(target.parents):
self.gen.fact(fn.target_parent(target.name, parent.name))
# prefer best possible targets; weight others poorly so
# they're not used unless set explicitly
if target.name in best_targets:
self.gen.fact(fn.default_target_weight(target.name, i))
i += 1
else:
self.gen.fact(fn.default_target_weight(target.name, 100))
self.gen.newline()
def virtual_providers(self):
self.gen.h2("Virtual providers")
assert self.possible_virtuals is not None
# what provides what
for vspec in sorted(self.possible_virtuals):
self.gen.fact(fn.virtual(vspec))
self.gen.newline()
def generate_possible_compilers(self, specs):
compilers = all_compilers_in_config()
cspecs = set([c.spec for c in compilers])
# add compiler specs from the input line to possibilities if we
# don't require compilers to exist.
strict = spack.concretize.Concretizer().check_for_compiler_existence
for spec in specs:
for s in spec.traverse():
if not s.compiler or not s.compiler.concrete:
continue
if strict and s.compiler not in cspecs:
raise spack.concretize.UnavailableCompilerVersionError(
s.compiler
)
else:
cspecs.add(s.compiler)
self.gen.fact(fn.allow_compiler(
s.compiler.name, s.compiler.version
))
return cspecs
def define_version_constraints(self):
"""Define what version_satisfies(...) means in ASP logic."""
for pkg_name, versions in sorted(self.version_constraints):
# version must be *one* of the ones the spec allows.
allowed_versions = [
v for v in sorted(self.possible_versions[pkg_name])
if v.satisfies(versions)
]
# This is needed to account for a variable number of
# numbers e.g. if both 1.0 and 1.0.2 are possible versions
exact_match = [v for v in allowed_versions if v == versions]
if exact_match:
allowed_versions = exact_match
# generate facts for each package constraint and the version
# that satisfies it
for v in allowed_versions:
self.gen.fact(fn.version_satisfies(pkg_name, versions, v))
self.gen.newline()
def define_virtual_constraints(self):
"""Define versions for constraints on virtuals.
Must be called before define_version_constraints().
"""
# aggregate constraints into per-virtual sets
constraint_map = collections.defaultdict(lambda: set())
for pkg_name, versions in self.version_constraints:
if not spack.repo.path.is_virtual(pkg_name):
continue
constraint_map[pkg_name].add(versions)
# extract all the real versions mentioned in version ranges
def versions_for(v):
if isinstance(v, spack.version.Version):
return [v]
elif isinstance(v, spack.version.VersionRange):
result = [v.start] if v.start else []
result += [v.end] if v.end else []
return result
elif isinstance(v, spack.version.VersionList):
return sum((versions_for(e) for e in v), [])
else:
raise TypeError("expected version type, found: %s" % type(v))
# define a set of synthetic possible versions for virtuals, so
# that `version_satisfies(Package, Constraint, Version)` has the
# same semantics for virtuals as for regular packages.
for pkg_name, versions in sorted(constraint_map.items()):
possible_versions = set(
sum([versions_for(v) for v in versions], [])
)
for version in sorted(possible_versions):
self.possible_versions[pkg_name].add(version)
def define_compiler_version_constraints(self):
compiler_list = spack.compilers.all_compiler_specs()
compiler_list = list(sorted(set(compiler_list)))
for pkg_name, cspec in self.compiler_version_constraints:
for compiler in compiler_list:
if compiler.satisfies(cspec):
self.gen.fact(
fn.node_compiler_version_satisfies(
pkg_name,
cspec.name,
cspec.versions,
compiler.version
)
)
self.gen.newline()
def define_target_constraints(self):
def _all_targets_satisfiying(single_constraint):
allowed_targets = []
if ':' not in single_constraint:
return [single_constraint]
t_min, _, t_max = single_constraint.partition(':')
for test_target in archspec.cpu.TARGETS.values():
# Check lower bound
if t_min and not t_min <= test_target:
continue
# Check upper bound
if t_max and not t_max >= test_target:
continue
allowed_targets.append(test_target)
return allowed_targets
cache = {}
for spec_name, target_constraint in sorted(self.target_constraints):
# Construct the list of allowed targets for this constraint
allowed_targets = []
for single_constraint in str(target_constraint).split(','):
if single_constraint not in cache:
cache[single_constraint] = _all_targets_satisfiying(
single_constraint
)
allowed_targets.extend(cache[single_constraint])
for target in allowed_targets:
self.gen.fact(
fn.node_target_satisfies(
spec_name, target_constraint, target
)
)
self.gen.newline()
def define_variant_values(self):
"""Validate variant values from the command line.
Also add valid variant values from the command line to the
possible values for a variant.
"""
# Tell the concretizer about possible values from specs we saw in
# spec_clauses()
for pkg, variant, value in sorted(self.variant_values_from_specs):
self.gen.fact(fn.variant_possible_value(pkg, variant, value))
def setup(self, driver, specs, tests=False):
"""Generate an ASP program with relevant constraints for specs.
This calls methods on the solve driver to set up the problem with
facts and rules from all possible dependencies of the input
specs, as well as constraints from the specs themselves.
Arguments:
specs (list): list of Specs to solve
"""
self._condition_id_counter = itertools.count()
# preliminary checks
check_packages_exist(specs)
# get list of all possible dependencies
self.possible_virtuals = set(
x.name for x in specs if x.virtual
)
possible = spack.package.possible_dependencies(
*specs,
virtuals=self.possible_virtuals,
deptype=spack.dependency.all_deptypes
)
pkgs = set(possible)
# driver is used by all the functions below to add facts and
# rules to generate an ASP program.
self.gen = driver
# get possible compilers
self.possible_compilers = self.generate_possible_compilers(specs)
# traverse all specs and packages to build dict of possible versions
self.build_version_dict(possible, specs)
self.gen.h1('General Constraints')
self.available_compilers()
self.compiler_defaults()
self.compiler_supports_os()
# architecture defaults
self.platform_defaults()
self.os_defaults(specs)
self.target_defaults(specs)
self.virtual_providers()
self.provider_defaults()
self.external_packages()
self.flag_defaults()
self.gen.h1('Package Constraints')
for pkg in sorted(pkgs):
self.gen.h2('Package rules: %s' % pkg)
self.pkg_rules(pkg, tests=tests)
self.gen.h2('Package preferences: %s' % pkg)
self.preferred_variants(pkg)
self.preferred_targets(pkg)
# Inject dev_path from environment
env = ev.active_environment()
if env:
for spec in sorted(specs):
for dep in spec.traverse():
_develop_specs_from_env(dep, env)
self.gen.h1('Spec Constraints')
for spec in sorted(specs):
self.gen.h2('Spec: %s' % str(spec))
self.gen.fact(
fn.virtual_root(spec.name) if spec.virtual
else fn.root(spec.name)
)
for clause in self.spec_clauses(spec):
self.gen.fact(clause)
if clause.name == 'variant_set':
self.gen.fact(fn.variant_default_value_from_cli(
*clause.args
))
self.gen.h1("Variant Values defined in specs")
self.define_variant_values()
self.gen.h1("Virtual Constraints")
self.define_virtual_constraints()
self.gen.h1("Version Constraints")
self.define_version_constraints()
self.gen.h1("Compiler Version Constraints")
self.define_compiler_version_constraints()
self.gen.h1("Target Constraints")
self.define_target_constraints()
class SpecBuilder(object):
"""Class with actions to rebuild a spec from ASP results."""
def __init__(self, specs):
self._result = None
self._command_line_specs = specs
self._flag_sources = collections.defaultdict(lambda: set())
self._flag_compiler_defaults = set()
def node(self, pkg):
if pkg not in self._specs:
self._specs[pkg] = spack.spec.Spec(pkg)
def _arch(self, pkg):
arch = self._specs[pkg].architecture
if not arch:
arch = spack.spec.ArchSpec()
self._specs[pkg].architecture = arch
return arch
def node_platform(self, pkg, platform):
self._arch(pkg).platform = platform
def node_os(self, pkg, os):
self._arch(pkg).os = os
def node_target(self, pkg, target):
self._arch(pkg).target = target
def variant_value(self, pkg, name, value):
# FIXME: is there a way not to special case 'dev_path' everywhere?
if name == 'dev_path':
self._specs[pkg].variants.setdefault(
name,
spack.variant.SingleValuedVariant(name, value)
)
return
if name == 'patches':
self._specs[pkg].variants.setdefault(
name,
spack.variant.MultiValuedVariant(name, value)
)
return
self._specs[pkg].update_variant_validate(name, value)
def version(self, pkg, version):
self._specs[pkg].versions = spack.version.ver([version])
def node_compiler(self, pkg, compiler):
self._specs[pkg].compiler = spack.spec.CompilerSpec(compiler)
def node_compiler_version(self, pkg, compiler, version):
self._specs[pkg].compiler.versions = spack.version.VersionList(
[version])
def node_flag_compiler_default(self, pkg):
self._flag_compiler_defaults.add(pkg)
def node_flag(self, pkg, flag_type, flag):
self._specs[pkg].compiler_flags.setdefault(flag_type, []).append(flag)
def node_flag_source(self, pkg, source):
self._flag_sources[pkg].add(source)
def no_flags(self, pkg, flag_type):
self._specs[pkg].compiler_flags[flag_type] = []
def external_spec_selected(self, pkg, idx):
"""This means that the external spec and index idx
has been selected for this package.
"""
packages_yaml = spack.config.get('packages')
packages_yaml = _normalize_packages_yaml(packages_yaml)
spec_info = packages_yaml[pkg]['externals'][int(idx)]
self._specs[pkg].external_path = spec_info.get('prefix', None)
self._specs[pkg].external_modules = (
spack.spec.Spec._format_module_list(spec_info.get('modules', None))
)
self._specs[pkg].extra_attributes = spec_info.get(
'extra_attributes', {}
)
def depends_on(self, pkg, dep, type):
dependency = self._specs[pkg]._dependencies.get(dep)
if not dependency:
self._specs[pkg]._add_dependency(
self._specs[dep], (type,))
else:
dependency.add_type(type)
def reorder_flags(self):
"""Order compiler flags on specs in predefined order.
We order flags so that any node's flags will take priority over
those of its dependents. That is, the deepest node in the DAG's
flags will appear last on the compile line, in the order they
were specified.
The solver determines wihch flags are on nodes; this routine
imposes order afterwards.
"""
# nodes with no flags get flag order from compiler
compilers = dict((c.spec, c) for c in all_compilers_in_config())
for pkg in self._flag_compiler_defaults:
spec = self._specs[pkg]
compiler_flags = compilers[spec.compiler].flags
check_same_flags(spec.compiler_flags, compiler_flags)
spec.compiler_flags.update(compiler_flags)
# index of all specs (and deps) from the command line by name
cmd_specs = dict(
(s.name, s)
for spec in self._command_line_specs
for s in spec.traverse())
# iterate through specs with specified flags
for pkg, sources in self._flag_sources.items():
spec = self._specs[pkg]
# order is determined by the DAG. A spec's flags come after
# any from its ancestors on the compile line.
order = [
s.name
for s in spec.traverse(order='post', direction='parents')]
# sort the sources in our DAG order
sorted_sources = sorted(
sources, key=lambda s: order.index(s))
# add flags from each source, lowest to highest precedence
flags = collections.defaultdict(lambda: [])
for source_name in sorted_sources:
source = cmd_specs[source_name]
for name, flag_list in source.compiler_flags.items():
extend_flag_list(flags[name], flag_list)
check_same_flags(spec.compiler_flags, flags)
spec.compiler_flags.update(flags)
def deprecated(self, pkg, version):
msg = 'using "{0}@{1}" which is a deprecated version'
tty.warn(msg.format(pkg, version))
def build_specs(self, function_tuples):
# Functions don't seem to be in particular order in output. Sort
# them here so that directives that build objects (like node and
# node_compiler) are called in the right order.
function_tuples.sort(key=lambda f: {
"node": -2,
"node_compiler": -1,
}.get(f[0], 0))
self._specs = {}
for name, args in function_tuples:
action = getattr(self, name, None)
# print out unknown actions so we can display them for debugging
if not action:
msg = "%s(%s)" % (name, ", ".join(str(a) for a in args))
tty.debug(msg)
continue
assert action and callable(action)
# ignore predicates on virtual packages, as they're used for
# solving but don't construct anything
pkg = args[0]
if spack.repo.path.is_virtual(pkg):
continue
action(*args)
# namespace assignment is done after the fact, as it is not
# currently part of the solve
for spec in self._specs.values():
repo = spack.repo.path.repo_for_pkg(spec)
spec.namespace = repo.namespace
# fix flags after all specs are constructed
self.reorder_flags()
# inject patches -- note that we' can't use set() to unique the
# roots here, because the specs aren't complete, and the hash
# function will loop forever.
roots = [spec.root for spec in self._specs.values()]
roots = dict((id(r), r) for r in roots)
for root in roots.values():
spack.spec.Spec.inject_patches_variant(root)
# Add external paths to specs with just external modules
for s in self._specs.values():
spack.spec.Spec.ensure_external_path_if_external(s)
for s in self._specs.values():
_develop_specs_from_env(s, ev.active_environment())
for s in self._specs.values():
s._mark_concrete()
for s in self._specs.values():
spack.spec.Spec.ensure_no_deprecated(s)
return self._specs
def _develop_specs_from_env(spec, env):
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
if not dev_info:
return
path = os.path.normpath(os.path.join(env.path, dev_info['path']))
if 'dev_path' in spec.variants:
assert spec.variants['dev_path'].value == path
else:
spec.variants.setdefault(
'dev_path', spack.variant.SingleValuedVariant('dev_path', path)
)
spec.constrain(dev_info['spec'])
#
# These are handwritten parts for the Spack ASP model.
#
def solve(specs, dump=(), models=0, timers=False, stats=False, tests=False):
"""Solve for a stable model of specs.
Arguments:
specs (list): list of Specs to solve.
dump (tuple): what to dump
models (int): number of models to search (default: 0)
"""
driver = PyclingoDriver()
if "asp" in dump:
driver.out = sys.stdout
# Check upfront that the variants are admissible
for root in specs:
for s in root.traverse():
if s.virtual:
continue
spack.spec.Spec.ensure_valid_variants(s)
setup = SpackSolverSetup()
return driver.solve(setup, specs, dump, models, timers, stats, tests)
| 36.265366 | 85 | 0.595264 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | AaltoSciComp/spack | lib/spack/spack/solver/asp.py | 61,361 | Python |
import logging
import sys
from notion.block import PageBlock
from notion.client import NotionClient
from requests import HTTPError, codes
from enex2notion.utils_exceptions import BadTokenException
logger = logging.getLogger(__name__)
def get_root(token, name):
if not token:
logger.warning(
"No token provided, dry run mode. Nothing will be uploaded to Notion!"
)
return None
try:
client = get_notion_client(token)
except BadTokenException:
logger.error("Invalid token provided!")
sys.exit(1)
return get_import_root(client, name)
def get_notion_client(token):
try:
return NotionClient(token_v2=token)
except HTTPError as e: # pragma: no cover
if e.response.status_code == codes["unauthorized"]:
raise BadTokenException
raise
def get_import_root(client, title):
try:
top_pages = client.get_top_level_pages()
except KeyError: # pragma: no cover
# Need empty account to test
top_pages = []
for page in top_pages:
if isinstance(page, PageBlock) and page.title == title:
logger.info(f"'{title}' page found")
return page
logger.info(f"Creating '{title}' page...")
return client.current_space.add_page(title)
| 25.25 | 82 | 0.665651 | [
"MIT"
] | vzhd1701/enex2notion | enex2notion/cli_notion.py | 1,313 | Python |
from ds2.orderedmapping.bstmapping import BSTMapping, BSTNode
from ds2.orderedmapping.balancedbst import BalancedBST, BalancedBSTNode
from ds2.orderedmapping.wbtree import WBTree, WBTreeNode
from ds2.orderedmapping.avltree import AVLTree, AVLTreeNode
from ds2.orderedmapping.splaytree import SplayTree, SplayTreeNode
| 52.833333 | 71 | 0.873817 | [
"MIT"
] | aslisabanci/datastructures | ds2/orderedmapping/__init__.py | 317 | Python |
from flask_testing import TestCase
from flask import url_for
from core import app, db
import unittest
from core.models import FeatureRequest, Client, ProductArea
import datetime
class BaseTest(TestCase):
SQLALCHEMY_DATABASE_URI = "sqlite://"
TESTING = True
def create_app(self):
app.config["TESTING"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite://"
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
class HomepageTest(BaseTest):
def test_homepage(self):
"Make sure that homepage works fine"
response = self.client.get(url_for("home_view"))
assert b"Add a feature request:" in response.data
assert b"List feature requests:" in response.data
class ListpageTest(BaseTest):
def test_empty_listpage(self):
"Make sure that empty list page works fine"
response = self.client.get(url_for("home_view"))
response = self.client.get(url_for("feature_requests_view"))
assert b"No feature requests found." in response.data
def test_non_empty_listpage(self):
"Also that it can display multiple entries"
fr = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(fr)
fr2 = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(fr2)
db.session.commit()
response = self.client.get(url_for("feature_requests_view"))
assert response.data.count(b"Update") == 2
assert response.data.count(b"Delete") == 2
assert (
url_for("feature_requests_update", feature_request_id=1).encode()
in response.data
)
assert (
url_for("feature_requests_delete", feature_request_id=1).encode()
in response.data
)
class AddOtherObjectsMixin:
"A reusable mixin that adds a client and a product area to the db"
def add_other_objects(self):
self.cl = Client("C1")
db.session.add(self.cl)
self.pa = ProductArea("PA1")
db.session.add(self.pa)
db.session.commit()
class CreatepageTest(AddOtherObjectsMixin, BaseTest):
def test_createpage(self):
"Make sure that the create page works"
response = self.client.get(url_for("feature_requests_create"))
assert b"Add Feature Request" in response.data
assert b"<form method='POST'>" in response.data
assert b"form-group has-error" not in response.data
def test_createpage_error(self):
"The create page should return with error when post data is missing"
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
),
)
assert b"form-group has-error" in response.data
assert b"<form method='POST'>" in response.data
assert response.status == "200 OK"
def test_createpage_success(self):
"The create page should return a 302 FOUND redirect when an entry is submitted"
client = Client("C1")
db.session.add(client)
product_area = ProductArea("PA1")
db.session.add(product_area)
db.session.commit()
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=client.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=product_area.id,
),
)
assert response.status == "302 FOUND"
def test_createpage_success_flash(self):
"""The create page should display the proper flash message when an object is
created"""
self.add_other_objects()
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert b"Feature request created!" in response.data
assert response.data.count(b"Update") == 1
assert response.data.count(b"Delete") == 1
assert self.cl.name.encode() in response.data
assert self.pa.name.encode() in response.data
def test_createpage_change_priorities(self):
"""The create page should change the priorities of the other objects when a
new one has the same priority and client"""
self.add_other_objects()
fr = FeatureRequest(
title="Title",
description="Desc",
client=self.cl,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa,
)
db.session.add(fr)
db.session.commit()
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2
class UpdatepageTest(AddOtherObjectsMixin, BaseTest):
def add_feature_request(self):
"A reusable method for this class"
self.fr = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(self.fr)
db.session.commit()
def test_updatepage_not_found(self):
"Make sure that the update page returs 404 when the obj is not found"
response = self.client.get(
url_for("feature_requests_update", feature_request_id=1232)
)
assert response.status == "404 NOT FOUND"
def test_updatepage_ok(self):
"Make sure that the update page is displayed properly along with the object"
self.add_feature_request()
response = self.client.get(
url_for("feature_requests_update", feature_request_id=self.fr.id)
)
assert "Edit Feature Request: {0}".format(self.fr.id).encode() in response.data
assert b"<form method='POST'>" in response.data
assert b"form-group has-error" not in response.data
assert self.fr.title.encode() in response.data
assert self.fr.description.encode() in response.data
def test_updatepage_error(self):
"The createpage should return an error when data is missing"
self.add_feature_request()
response = self.client.post(
url_for("feature_requests_update", feature_request_id=self.fr.id),
data=dict(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
),
)
assert b"form-group has-error" in response.data
assert b"<form method='POST'>" in response.data
assert response.status == "200 OK"
def test_createpage_success(self):
"The createpage should properly update the object"
self.add_feature_request()
self.add_other_objects()
newtitle = "The new title"
response = self.client.post(
url_for("feature_requests_update", feature_request_id=self.fr.id),
data=dict(
title=newtitle,
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
)
assert response.status == "302 FOUND"
assert FeatureRequest.query.filter_by(id=self.fr.id).first().title == newtitle
def test_updatepage_success_flash(self):
"""Make sure that the flash message is displayed correctly and we are
redirected to the list view"""
self.add_feature_request()
self.add_other_objects()
response = self.client.post(
url_for("feature_requests_update", feature_request_id=self.fr.id),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert b"Feature request updated!" in response.data
assert response.data.count(b"Update") == 1
assert response.data.count(b"Delete") == 1
assert self.cl.name.encode() in response.data
assert self.pa.name.encode() in response.data
def test_updatepage_change_priorities(self):
"The updatepage should also update the client priorities"
self.add_other_objects()
fr = FeatureRequest(
title="Title",
description="Desc",
client=self.cl,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa,
)
db.session.add(fr)
fr2 = FeatureRequest(
title="Title",
description="Desc",
client=self.cl,
client_priority=2,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa,
)
db.session.add(fr2)
db.session.commit()
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1
assert FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 2
response = self.client.post(
url_for("feature_requests_update", feature_request_id=2),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2
assert FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 1
class DeletepageTest(BaseTest):
def add_feature_request(self):
"A reusable method for this class"
self.fr = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(self.fr)
db.session.commit()
def test_deletepdatepage_only_post(self):
"Make sure that the delete page returns 405 when requested with get"
response = self.client.get(
url_for("feature_requests_delete", feature_request_id=1232)
)
assert response.status == "405 METHOD NOT ALLOWED"
def test_deletepdatepage_not_found(self):
"Make sure that the delete page returs 404 when the obj is not found"
response = self.client.post(
url_for("feature_requests_delete", feature_request_id=1232)
)
assert response.status == "404 NOT FOUND"
def test_deletepage_ok(self):
"Make sure that the delete page deletes the obj"
self.add_feature_request()
assert db.session.query(FeatureRequest.query.filter().exists()).scalar() is True
response = self.client.post(
url_for("feature_requests_delete", feature_request_id=self.fr.id)
)
assert (
db.session.query(FeatureRequest.query.filter().exists()).scalar() is False
)
assert response.status == "302 FOUND"
def test_deletepage_flash_message(self):
"Make sure that the delete page shows the proper flash message"
self.add_feature_request()
response = self.client.post(
url_for("feature_requests_delete", feature_request_id=self.fr.id),
follow_redirects=True,
)
assert response.status == "200 OK"
assert b"Feature request deleted!" in response.data
assert response.data.count(b"Update") == 0
assert response.data.count(b"Delete") == 0
if __name__ == "__main__":
unittest.main()
| 35.954907 | 88 | 0.594393 | [
"Unlicense"
] | spapas/feature-requests | test_core.py | 13,555 | Python |
from .settings import *
RESPA_CATERINGS_ENABLED = True
RESPA_COMMENTS_ENABLED = True
RESPA_PAYMENTS_ENABLED = True
# Bambora Payform provider settings
RESPA_PAYMENTS_PROVIDER_CLASS = 'payments.providers.BamboraPayformProvider'
RESPA_PAYMENTS_BAMBORA_API_URL = 'https://real-bambora-api-url/api'
RESPA_PAYMENTS_BAMBORA_API_KEY = 'dummy-key'
RESPA_PAYMENTS_BAMBORA_API_SECRET = 'dummy-secret'
RESPA_PAYMENTS_BAMBORA_PAYMENT_METHODS = ['dummy-bank']
DJANGO_ADMIN_LOGOUT_REDIRECT_URL='https://hel.fi'
RESPA_ADMIN_LOGOUT_REDIRECT_URL='https://hel.fi'
# API token auth endpoint
MACHINE_TO_MACHINE_AUTH_ENABLED=1
| 35.764706 | 75 | 0.845395 | [
"MIT"
] | johlindq/respa | respa/test_settings.py | 608 | Python |
# Theory: Indexes
# There are several types of collections to store data in Python.
# Positionally ordered collections of elements are usually called
# sequences, and both lists and strings belong to them. EAch
# element in a list, as well as each character in a string, has an
# index that corresponds to its position. Indexes are used to
# access elements within a sequence. Indexing is zero-based, so if
# you see a person who counts from zero, you must have met a
# programmer.
| 40.333333 | 66 | 0.770661 | [
"MIT"
] | chanchanchong/PYTHON-TRACK-IN-HYPERSKILL | Computer science/Programming languages/Python/Working with data/Collections/Lists/Indexes/topic.py | 484 | Python |
from __future__ import absolute_import
import requests
import json
import logging
from .base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.description = '''
Zeit Provider requires a token to access its API.
You can generate one for your account on the following URL:
https://zeit.co/account/tokens'''
subparser.add_argument('--auth-token', help='specify your API token')
# Implements the DNS Zeit provider.
# The API is quite simple: you can list all records, add one record or delete one record.
# - list is pretty straightforward: we get all records then filter for given parameters,
# - add uses directly the API to add a new record without any added complexity,
# - delete uses list + delete: we get the list of all records, filter on the given parameters and delete record by id,
# - update uses list + delete + add: we get the list of all records, find record for given identifier, then insert a new record and delete the old record.
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = 'https://api.zeit.co/v2/domains'
def authenticate(self):
result = self._get('/{0}'.format(self.options['domain']))
if not result['uid']:
raise Exception('Error, domain {0} not found'.format(self.options['domain']))
self.domain_id = result['uid']
def list_records(self, type=None, name=None, content=None):
result = self._get('/{0}/records'.format(self.options['domain']))
raw_records = result['records']
if type:
raw_records = [raw_record for raw_record in raw_records if raw_record['type'] == type]
if name:
raw_records = [raw_record for raw_record in raw_records if raw_record['name'] == self._relative_name(name)]
if content:
raw_records = [raw_record for raw_record in raw_records if raw_record['value'] == content]
records = []
for raw_record in raw_records:
records.append({
'id': raw_record['id'],
'type': raw_record['type'],
'name': self._full_name(raw_record['name']),
'content': raw_record['value']
})
LOGGER.debug('list_records: %s', records)
return records
def create_record(self, type, name, content):
# We ignore creation if a record already exists for given type/name/content
records = self.list_records(type, name, content)
if records:
LOGGER.debug('create_record (ignored, duplicate): %s', records[0]['id'])
return True
data = {
'type': type,
'name': self._relative_name(name),
'value': content
}
result = self._post('/{0}/records'.format(self.options['domain']), data)
if not result['uid']:
raise Exception('Error occured when inserting the new record.')
LOGGER.debug('create_record: %s', result['uid'])
return True
def update_record(self, identifier, type=None, name=None, content=None):
# Zeit do not allow to update a record, only add or remove.
# So we get the corresponding record, dump or update its content and insert it as a new record.
# Then we remove the old record.
records = []
if identifier:
records = self.list_records()
records = [record for record in records if record['id'] == identifier]
else:
records = self.list_records(type, name)
if not records:
raise Exception('No record found for identifer: {0}'.format(identifier))
if len(records) > 1:
LOGGER.warn('Multiple records have been found for given parameters. Only first one will be updated (id: {0})'.format(records[0]['id']))
data = {
'type': type,
'name': self._relative_name(name),
'value': content
}
if not type:
data['type'] = records[0]['type']
if not name:
data['name'] = self._relative_name(records[0]['name'])
if not content:
data['value'] = records[0]['content']
result = self._post('/{0}/records'.format(self.options['domain']), data)
self._delete('/{0}/records/{1}'.format(self.options['domain'], records[0]['id']))
LOGGER.debug('update_record: %s => %s', records[0]['id'], result['uid'])
return True
def delete_record(self, identifier=None, type=None, name=None, content=None):
delete_record_ids = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_ids = [record['id'] for record in records]
else:
delete_record_ids.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_ids)
for delete_record_id in delete_record_ids:
self._delete('/{0}/records/{1}'.format(self.options['domain'], delete_record_id))
LOGGER.debug('delete_record: %s', True)
return True
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
request = requests.request(action, self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers={'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))})
request.raise_for_status()
return request.json() | 38.695364 | 156 | 0.60688 | [
"MIT"
] | alexsilva1983/lexicon | lexicon/providers/zeit.py | 5,843 | Python |
# Generated by Django 3.2.9 on 2022-01-01 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0003_auto_20220101_1040'),
]
operations = [
migrations.RenameField(
model_name='notes',
old_name='category',
new_name='categories',
),
migrations.RemoveField(
model_name='notescategory',
name='count',
),
migrations.AddField(
model_name='notesrating',
name='comment',
field=models.TextField(null=True),
),
]
| 22.714286 | 47 | 0.553459 | [
"MIT"
] | ArnedyNavi/studymate | notes/migrations/0004_auto_20220101_1047.py | 636 | Python |
import tkinter as tk
from tkinter import ttk
import json
from dashboard.entities.InputField import InputField
from dashboard.entities.StatusField import StatusField
class Devices(ttk.Frame):
"""
Devices Frame for Settings
"""
def __init__(self, parent, settings):
"""
Constructs a WarningPopUp
:param parent: Parent Frame
:param settings: settings class
"""
self.settings = settings
ttk.Frame.__init__(self, parent, relief="raised", borderwidth=2)
self.content = ttk.Frame(self, borderwidth=2)
self.content.pack(expand=True, fill=tk.X, side='top', anchor='n')
self.devices = []
label1 = tk.Label(self.content, text="Apparaten", font=("Verdana", 14), relief="groove")
label1.pack(expand=True, fill=tk.X, side='top')
self.render_devices()
def render_devices(self):
# Removed current sidebar buttons
for frame in self.devices:
frame.pack_forget()
# Add sidebar buttons based on json
self.settings.load_devices()
for serial_number, data in self.settings.devices.items():
self.build_device(serial_number, data)
def build_device(self, serial_number, data):
button = ttk.Button(self.content, text=data["Name"], width=15,
command=lambda: self.settings.show_view(serial_number, self))
button.pack(fill=tk.X, pady=2)
self.devices.append(button) | 33.111111 | 96 | 0.641611 | [
"MIT"
] | Hexagoons/GUI-Arduino-Weather-Station | dashboard/entities/Devices.py | 1,490 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.