content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import argparse
import sys
from os import path
import cv2 as cv
from mcrops import veget, utils
def full_imshow(name, image):
cv.namedWindow(name, cv.WINDOW_NORMAL)
cv.resizeWindow(name, 800, 600)
cv.imshow(name, image)
def main(image_path: str, resolution: float, row_sep: float):
print(f'Starting analysis.\n')
print(f'Loading image {image_path}')
# Load a crop field image
image = cv.imread(image_path)
if image is None:
raise ValueError(f'Unable to load image {image_path}')
h, w = image.shape[:2]
image_draw = image.copy()
print(f'Image loaded. Size is {w}x{h} pixels.')
print('Segmenting vegetation')
# Segment vegetation
veg_mask = veget.segment_vegetation(image)
print('Detecting crop area')
# Detect the crop field ROI area
roi_poly = veget.detect_roi(
veg_mask, row_sep=row_sep, resolution=resolution
)
# Draw the contours of the ROI area
cv.drawContours(
image=image_draw,
contours=[roi_poly],
contourIdx=-1,
color=(0, 0, 255),
thickness=8,
lineType=cv.LINE_AA
)
# Build a mask image from the ROI polyline
roi_mask = utils.poly_mask(roi_poly, veg_mask.shape[:2])
veg_mask[roi_mask == 0] = 0
print('Computing vegetation density map')
# Create a vegetation density map from the vegetation mask
density_map = veget.mask_density(
veg_mask,
roi_mask,
resolution=resolution,
cell_size=(8, 8)
)
d_min = density_map.min()
d_max = density_map.max()
print(f'Vegetation density is range [{d_min:.3f}, {d_max:.3f}]')
# Convert the vegetation density map to a color image
image_map = utils.array_image(density_map, colormap=cv.COLORMAP_JET)
full_imshow('Crop field image', image_draw)
full_imshow('Vegetation mask', veg_mask)
full_imshow('Vegetation density map', image_map)
print(f'Analysis finished. Press any key to quit.\n')
cv.waitKey(0)
cv.destroyAllWindows()
if __name__ == '__main__':
curr_dir = path.dirname(path.abspath(__file__))
parent_dir, _ = path.split(curr_dir)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
type=str,
required=False,
default=path.join(curr_dir, 'data/crop_field_sparse_res40.png'),
help='Path to crop field image.'
)
parser.add_argument(
'--res',
type=float,
required=False,
default=40,
help='Image resolution in pixels/meter.'
)
parser.add_argument(
'--row_sep',
type=float,
required=False,
default=0.7,
help='Approximated mean crop rows separation in meters.'
)
args = parser.parse_args(sys.argv[1:])
main(image_path=args.input, resolution=args.res, row_sep=args.row_sep)
| 27.247619 | 74 | 0.650122 | [
"MIT"
] | raikel/mcrops | examples/vegetation.py | 2,861 | Python |
# Copyright (c) 2014, Raphael Kubo da Costa <[email protected]>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import PyKDE4.kdecore
if __name__ == '__main__':
try:
import PyKDE4.pykdeconfig
pykdecfg = PyKDE4.pykdeconfig.Configuration()
sip_dir = pykdecfg.pykde_sip_dir
sip_flags = pykdecfg.pykde_kde_sip_flags
except ImportError:
# PyQt4 >= 4.10.0 was built with configure-ng.py instead of
# configure.py, so pyqtconfig.py and pykdeconfig.py are not installed.
sip_dir = PyKDE4.kdecore.PYKDE_CONFIGURATION['sip_dir']
sip_flags = PyKDE4.kdecore.PYKDE_CONFIGURATION['sip_flags']
print('pykde_version:%06.x' % PyKDE4.kdecore.version())
print('pykde_version_str:%s' % PyKDE4.kdecore.versionString())
print('pykde_sip_dir:%s' % sip_dir)
print('pykde_sip_flags:%s' % sip_flags)
| 41.73913 | 78 | 0.713542 | [
"BSD-3-Clause"
] | KDE-China/extra-cmake-modules | attic/modules/FindPyKDE4.py | 960 | Python |
# -*- coding: utf-8 -*-
from random import randint
import json
from .base import analyse_process_graph, PROCESS_DICT, PROCESS_DESCRIPTION_DICT
from openeo_grass_gis_driver.process_schemas import Parameter, ProcessDescription, ReturnValue
from .actinia_interface import ActiniaInterface
__license__ = "Apache License, Version 2.0"
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Soeren Gebbert"
__email__ = "[email protected]"
PROCESS_NAME = "get_data"
def create_process_description():
p_imagery = Parameter(description="Any openEO process object that returns raster datasets "
"or space-time raster dataset",
schema={"type": "object", "format": "eodata"},
required=False)
p_data_id = Parameter(description="The identifier of a single raster-, vector- or space-time raster dataset",
schema={"type": "string",
"examples": ["nc_spm_08.landsat.raster.lsat5_1987_10",
"nc_spm_08.PERMANENT.vector.lakes",
"ECAD.PERMANENT.strds.temperature_1950_2017_yearly"]},
required=True)
rv = ReturnValue(description="Processed EO data.",
schema={"type": "object", "format": "eodata"})
simple_example = {
"process_id": PROCESS_NAME,
"data_id": "nc_spm_08.PERMANENT.vector.lakes",
}
raster_vector_example = {
"process_id": PROCESS_NAME,
"data_id": "nc_spm_08.landsat.raster.lsat5_1987_10",
"imagery": {
"process_id": "get_data",
"data_id": "nc_spm_08.PERMANENT.vector.lakes"
}
}
strds_example = {
"process_id": PROCESS_NAME,
"data_id": "ECAD.PERMANENT.strds.temperature_1950_2017_yearly",
"imagery": {
"process_id": "get_data",
"data_id": "ECAD.PERMANENT.strds.precipitation_1950_2017_yearly"
}
}
examples = dict(simple_example=simple_example,
raster_vector_example=raster_vector_example,
strds_example=strds_example)
pd = ProcessDescription(name=PROCESS_NAME,
description="This process returns a raster-, a vector- or a space-time raster "
"datasets that is available in the /collections endpoint.",
summary="Returns a single dataset that is available in "
"the /collections endpoint for processing",
parameters={"imagery": p_imagery, "data_id": p_data_id},
returns=rv,
examples=examples)
return json.loads(pd.to_json())
PROCESS_DESCRIPTION_DICT[PROCESS_NAME] = create_process_description()
def create_process_chain_entry(input_name):
"""Create a Actinia process description that uses t.rast.series to create the minimum
value of the time series.
:param input_time_series: The input time series name
:param output_map: The name of the output map
:return: A Actinia process chain description
"""
location, mapset, datatype, layer_name = ActiniaInterface.layer_def_to_components(input_name)
input_name = layer_name
if mapset is not None:
input_name = layer_name + "@" + mapset
rn = randint(0, 1000000)
pc = {}
if datatype == "raster":
pc = {"id": "r_info_%i" % rn,
"module": "r.info",
"inputs": [{"param": "map", "value": input_name}, ],
"flags": "g"}
elif datatype == "vector":
pc = {"id": "v_info_%i" % rn,
"module": "v.info",
"inputs": [{"param": "map", "value": input_name}, ],
"flags": "g"}
elif datatype == "strds":
pc = {"id": "t_info_%i" % rn,
"module": "t.info",
"inputs": [{"param": "input", "value": input_name}, ],
"flags": "g"}
else:
raise Exception("Unsupported datatype")
return pc
def get_process_list(process):
"""Analyse the process description and return the Actinia process chain and the name of the processing result
:param process: The process description
:return: (output_names, actinia_process_list)
"""
input_names, process_list = analyse_process_graph(process)
output_names = []
# First analyse the data entrie
if "data_id" not in process:
raise Exception("Process %s requires parameter <data_id>" % PROCESS_NAME)
output_names.append(process["data_id"])
pc = create_process_chain_entry(input_name=process["data_id"])
process_list.append(pc)
# Then add the input to the output
for input_name in input_names:
# Create the output name based on the input name and method
output_name = input_name
output_names.append(output_name)
return output_names, process_list
PROCESS_DICT[PROCESS_NAME] = get_process_list
| 36.539007 | 113 | 0.607725 | [
"Apache-2.0"
] | AnikaBettge/openeo-grassgis-driver | src/openeo_grass_gis_driver/actinia_processing/get_data_process.py | 5,154 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2010 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
import unittest, os, os.path
from odf.opendocument import OpenDocumentSpreadsheet
import odf.table
import odf.office
import odf.form
import odf.draw
class TestForm(unittest.TestCase):
def test_ooo_ns(self):
""" Check that ooo exists in namespace declarations """
calcdoc = OpenDocumentSpreadsheet()
table = odf.table.Table(name="Costs")
forms = odf.office.Forms()
form = odf.form.Form(
controlimplementation="ooo:com.sun.star.form.component.Form")
lb = odf.form.Listbox(
controlimplementation="ooo:com.sun.star.form.component.ListBox", dropdown="true", id="control1")
form.addElement(lb)
forms.addElement(form)
table.addElement(forms)
# One empty line
tr = odf.table.TableRow()
table.addElement(tr)
tr = odf.table.TableRow()
# One empty cell
cell = odf.table.TableCell()
tr.addElement(cell)
cell = odf.table.TableCell()
draw = odf.draw.Control(
control="control1", height="0.1126in", width="0.798in",
x="0.0303in", y="0.0205in", endcelladdress="Costs.B2",
endx="0.8283in", endy="0.1331in")
cell.addElement(draw)
tr.addElement(cell)
table.addElement(tr)
calcdoc.spreadsheet.addElement(table)
result = calcdoc.contentxml() # contentxml() is supposed to yeld a bytes
self.assertNotEqual(-1, result.find(b'''xmlns:ooo="http://openoffice.org/2004/office"'''))
def test_form_controls(self):
odf.form.Button(id="Button")
odf.form.Checkbox(id="Checkbox")
odf.form.Combobox(id="Combobox")
odf.form.Date(id="Date")
odf.form.File(id="File")
odf.form.FixedText(id="FixedText")
odf.form.FormattedText(id="FormattedText")
odf.form.Frame(id="Frame")
odf.form.GenericControl(id="GenericControl")
odf.form.Grid(id="Grid")
odf.form.Hidden(id="Hidden")
odf.form.Image(id="Image")
odf.form.ImageFrame(id="ImageFrame")
odf.form.Listbox(id="Listbox")
odf.form.Number(id="Number")
odf.form.Password(id="Password")
odf.form.Radio(id="Radio")
odf.form.Text(id="Text")
odf.form.Textarea(id="Textarea")
odf.form.Time(id="Time")
odf.form.ValueRange(id="ValueRange")
if __name__ == '__main__':
unittest.main()
| 34.901099 | 107 | 0.652393 | [
"Apache-2.0"
] | 10088/hue | desktop/core/ext-py/odfpy-1.4.1/tests/testform.py | 3,177 | Python |
import os.path
import sys
from tf_pwa.config_loader import ConfigLoader
from tf_pwa.vis import draw_decay_struct
this_dir = os.path.dirname(__file__)
sys.path.insert(0, this_dir + "/..")
def main():
config = ConfigLoader("config.yml")
for i, dec in enumerate(config.get_decay()):
draw_decay_struct(
dec, filename="figure/fig_{}".format(i), format="png"
)
if __name__ == "__main__":
main()
| 21.666667 | 66 | 0.641758 | [
"MIT"
] | ReynLieu/tf-pwa | tutorials/examples/show_decay_chain.py | 455 | Python |
import os
import hashlib
from django.db import models
from sample.fields import Md5Field, Sha256Field
from scanworker.file import PickleableFileSample
from scaggr.settings import SAMPLE_UPLOAD_DIR, MAX_SHA256_DIRECTORY_DEPTH
def generate_hash_directories(hash_str):
return "/".join([d for d in hash_str[:MAX_SHA256_DIRECTORY_DEPTH]])
def get_hashed_filename_path_and_set_filename(instance, filename):
new_filename = instance.sha256
# todo confirm that this gets the proper upload dir off the instance
dir_path = "{0}{1}".format(instance.UPLOAD_DIR, generate_hash_directories(new_filename))
return os.path.join(dir_path, new_filename)
class AbstractFileSample(models.Model):
UPLOAD_DIR = SAMPLE_UPLOAD_DIR
DUPLICATE_MESSAGE = 'duplicateSHA256'
V_NAME_KEY = 'name'
V_SIZE_KEY = 'size'
V_HASH_KEY = 'hash'
V_ERROR_KEY = 'error'
V_URL_KEY = 'url'
V_MSG_KEY = 'msg'
md5 = Md5Field(max_length=Md5Field.length, null=False, unique=True)
sha256 = Sha256Field(max_length=Sha256Field.length, null=False, unique=True)
submission_time = models.DateTimeField(auto_now_add=True)
file = models.FileField(upload_to=get_hashed_filename_path_and_set_filename)
@property
def file_content(self):
# todo think about memory caching this
self.file.seek(0)
return self.file.read()
def file_hash(self):
return str(self.sha256)
@models.permalink
def get_absolute_url(self):
return ('sample-detail',(), {'slug' : self.sha256 })
@classmethod
def filename_hasher(cls, file_content):
return hashlib.sha256(file_content).hexdigest()
def get_pickleable_file(self):
return PickleableFileSample.file_object_factory(self.file)
def save(self, *args, **kwargs):
# make sure we do our required hashing before we save this thing
for hash_field_obj, model, direct, m2m in [self._meta.get_field_by_name('md5'),
self._meta.get_field_by_name('sha256')]:
if not getattr(self, hash_field_obj.name):
digest = hash_field_obj.hasher(self.file_content).hexdigest()
setattr(self, hash_field_obj.name, digest)
return super(AbstractFileSample, self).save(*args, **kwargs)
class Meta:
abstract = True
| 32.686567 | 89 | 0.755251 | [
"Apache-2.0"
] | scsich/phagescan | sample/abstract.py | 2,190 | Python |
""" Test the gym's code for configuring the DonkeyCar's camera settings.
"""
import os
import argparse
import gym
import gym_donkeycar
import numpy as np
import uuid
if __name__ == "__main__":
# Initialize the donkey environment
# where env_name one of:
env_list = [
"donkey-warehouse-v0",
"donkey-generated-roads-v0",
"donkey-avc-sparkfun-v0",
"donkey-generated-track-v0",
"donkey-roboracingleague-track-v0",
"donkey-waveshare-v0"
]
parser = argparse.ArgumentParser(description='gym_test')
parser.add_argument('--sim', type=str, default="sim_path",
help='path to unity simulator. maybe be left at default if you would like to start the sim on your own.')
parser.add_argument('--port', type=int, default=9091,
help='port to use for websockets')
parser.add_argument('--env_name', type=str, default='donkey-warehouse-v0',
help='name of donkey sim environment', choices=env_list)
args = parser.parse_args()
#%% SET UP ENVIRONMENT
cam = (256,256,3)
conf = {"exe_path" : args.sim,
"host" : "127.0.0.1",
"port" : args.port,
"body_style" : "donkey",
"body_rgb" : (128, 128, 128),
"car_name" : "me",
"font_size" : 100,
"racer_name" : "test",
"country" : "USA",
"bio" : "I am test client",
"guid" : str(uuid.uuid4()),
"cam_resolution" : cam,
"img_w" : cam[0],
"img_h" : cam[1],
"img_d" : cam[2],
}
env = gym.make(args.env_name, conf=conf)
print( "Env cam size: {}".format( env.viewer.get_sensor_size() ) )
speed = 0.5
steer = 0.0
max_steer = 1.0
#%% PLAY
obv = env.reset()
for t in range(100):
action = np.array([steer,speed]) # drive straight with small speed
try:
obv, reward, done, info = env.step(action)
except Exception as ex:
print( "Exception: {}".format( ex ) )
if obv.shape != cam:
print( "Invalid Image size: {}".format( obv.shape ) )
elif t == 10:
print( "Actual camera size: {}".format( obv.shape ) )
if done or (info['hit'] is True):
obv = env.reset()
print( "Exiting d/h: {}/{}".format( done, info['hit'] ) )
break
env.close()
| 27.953488 | 129 | 0.553245 | [
"MIT"
] | vihank/gym-donkeycar | examples/test_cam_config.py | 2,404 | Python |
from src.pre_processing import Preprocessing
def identifyQuery(query):
q_l: str = query
if q_l.__contains__("AND") or q_l.__contains__("OR") or q_l.__contains__("NOT"):
return "B"
elif query.__contains__("/"):
return "PR"
elif len(q_l.split()) == 1:
return "S"
else:
return "PO"
def positionalSearch(query, stop_list, dict_book: dict):
pipe = Preprocessing()
pipe.stop_word = stop_list
tokens = pipe.tokenizer(query)
stems = pipe.stemmer(tokens)
# dict_book structure: {"word": {doc-ID: [], ...}, ...}
w1 = stems[0]
w2 = stems[1]
print(w1, w2)
if dict_book.__contains__(w1) and dict_book.__contains__(w2):
posting1: dict = dict_book.get(w1) # dict returned, {docID:[], ...}
posting2: dict = dict_book.get(w2) # without using get() and type defining to be set not dict
else:
return []
# len_posting1 = len(posting1)
# len_posting2 = len(posting2)
doc_list = []
# i was iterating on sets rather than its keys
for docI in posting1.keys():
for docJ in posting2.keys(): # iterates on documents
if docI == docJ:
# print(docI)
poslist1: list = posting1.get(docI) # returns a position list
poslist2: list = posting2.get(docJ)
match: bool = False
for pos1 in poslist1: # hilary
for pos2 in poslist2: # clinton
if pos2 - pos1 == 2:
doc_list.append(docI)
match = True
break
if match is True:
break
return doc_list
def positionalSingleSearch(query, stop_list, dict_book: dict):
pipe = Preprocessing()
pipe.stop_word = stop_list
tokens = pipe.tokenizer(query)
stems = pipe.stemmer(tokens)
# dict_book structure: {"word": {doc-ID: [], ...}, ...}
w1 = stems[0]
if dict_book.keys().__contains__(w1):
posting1: dict = dict_book.get(w1) # dict returned, {docID:[], ...}
return list(posting1.keys())
else:
return []
def proximitySearch(query, stop_list, dict_book, k):
pipe = Preprocessing()
pipe.stop_word = stop_list
tokens = pipe.tokenizer(query)
stems = pipe.stemmer(tokens)
w1 = stems[0]
w2 = stems[1]
if dict_book.__contains__(w1) and dict_book.__contains__(w2):
posting1: dict = dict_book.get(w1) # dict returned, {docID:[], ...}
posting2: dict = dict_book.get(w2)
else:
return []
# len_posting1 = len(posting1)
# len_posting2 = len(posting2)
doc_list = []
for docI in posting1.keys():
for docJ in posting2.keys():
if docI == docJ:
poslist1: list = posting1.get(docI)
poslist2: list = posting2.get(docJ)
match: bool = False
for pos1 in poslist1: # hilary
for pos2 in poslist2: # clinton
if pos2 - pos1 == k+1:
doc_list.append(docI)
match = True
break
if match is True:
break
return doc_list
def booleanSearch(query, stop_list, dict_book):
pipe = Preprocessing()
pipe.stop_word = stop_list
# print(query)
tokens = pipe.tokenizer(query)
stems = pipe.stemmer(tokens)
if dict_book.__contains__(stems):
posting: set = dict_book[stems]
else:
return []
doc_list = []
for i in posting:
doc_list.append(i)
return doc_list
| 28.106061 | 102 | 0.547439 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | thealifaisal/boolean-retrieval-model | src/search.py | 3,710 | Python |
test = { 'name': 'q3_b',
'points': 5,
'suites': [ { 'cases': [ { 'code': '>>> no_match in '
"list(['professor', 'engineer', "
"'scientist', 'cat'])\n"
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 45.076923 | 80 | 0.235495 | [
"Apache-2.0"
] | UCBerkeley-SCET/DataX-Berkeley | dataxHWSp2021/HW3-4_NLP/student/tests/q3_b.py | 586 | Python |
#!/usr/bin/env python
#
# Copyright 2018 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a simple example demonstrating how to produce a message to
# Confluent Cloud then read it back again.
#
# https://www.confluent.io/confluent-cloud/
#
# Auto-creation of topics is disabled in Confluent Cloud. You will need to
# use the ccloud cli to create the python-test-topic topic before running this
# example.
#
# $ ccloud topic create python-test-topic
#
# The <ccloud bootstrap servers>, <ccloud key> and <ccloud secret> parameters
# are available via the Confluent Cloud web interface. For more information,
# refer to the quick-start:
#
# https://docs.confluent.io/current/cloud-quickstart.html
#
# to execute using Python 2.7:
# $ virtualenv ccloud_example
# $ source ccloud_example/bin/activate
# $ pip install confluent_kafka
# $ python confluent_cloud.py
# $ deactivate
#
# to execute using Python 3.x:
# $ python -m venv ccloud_example
# $ source ccloud_example/bin/activate
# $ pip install confluent_kafka
# $ python confluent_cloud.py
# $ deactivate
import uuid
from confluent_kafka import Producer, Consumer, KafkaError
p = Producer({
'bootstrap.servers': '<ccloud bootstrap servers>',
'broker.version.fallback': '0.10.0.0',
'api.version.fallback.ms': 0,
'sasl.mechanisms': 'PLAIN',
'security.protocol': 'SASL_SSL',
'sasl.username': '<ccloud key>',
'sasl.password': '<ccloud secret>'
})
def acked(err, msg):
"""Delivery report callback called (from flush()) on successful or failed delivery of the message."""
if err is not None:
print("failed to deliver message: {0}".format(err.str()))
else:
print("produced to: {0} [{1}] @ {2}".format(msg.topic(), msg.partition(), msg.offset()))
p.produce('python-test-topic', value='python test value', callback=acked)
# flush() is typically called when the producer is done sending messages to wait
# for outstanding messages to be transmitted to the broker and delivery report
# callbacks to get called. For continous producing you should call p.poll(0)
# after each produce() call to trigger delivery report callbacks.
p.flush(10)
c = Consumer({
'bootstrap.servers': '<ccloud bootstrap servers>',
'broker.version.fallback': '0.10.0.0',
'api.version.fallback.ms': 0,
'sasl.mechanisms': 'PLAIN',
'security.protocol': 'SASL_SSL',
'sasl.username': '<ccloud key>',
'sasl.password': '<ccloud secret>',
'group.id': str(uuid.uuid1()), # this will create a new consumer group on each invocation.
'auto.offset.reset': 'earliest'
})
c.subscribe(['python-test-topic'])
try:
while True:
msg = c.poll(0.1) # Wait for message or event/error
if msg is None:
# No message available within timeout.
# Initial message consumption may take up to `session.timeout.ms` for
# the group to rebalance and start consuming
continue
elif not msg.error():
print('consumed: {0}'.format(msg.value()))
elif msg.error().code() == KafkaError._PARTITION_EOF:
print('end of partition: {0} [{1}] @ {2}'.format(msg.topic(), msg.partition(), msg.offset()))
else:
print('error: {0}'.format(msg.error().str()))
except KeyboardInterrupt:
pass
finally:
# Leave group and commit final offsets
c.close()
| 33.422414 | 105 | 0.689709 | [
"Apache-2.0"
] | RasmusWL/confluent-kafka-python | examples/confluent_cloud.py | 3,877 | Python |
'''Paginatiors for Figures
'''
from rest_framework.pagination import LimitOffsetPagination
class FiguresLimitOffsetPagination(LimitOffsetPagination):
'''Custom Figures paginator to make the number of records returned consistent
'''
default_limit = None
| 22.416667 | 81 | 0.784387 | [
"MIT"
] | TheMOOCAgency/figures | figures/pagination.py | 269 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack import *
class ScalapackBase(CMakePackage):
"""Base class for building ScaLAPACK, shared with the AMD optimized version
of the library in the 'amdscalapack' package.
"""
variant(
'build_type',
default='Release',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
variant(
'shared',
default=True,
description='Build the shared library version'
)
variant(
'pic',
default=False,
description='Build position independent code'
)
provides('scalapack')
depends_on('mpi')
depends_on('lapack')
depends_on('blas')
depends_on('cmake', when='@2.0.0:', type='build')
# See: https://github.com/Reference-ScaLAPACK/scalapack/issues/9
patch("cmake_fortran_mangle.patch", when='@2.0.2:2.0')
# See: https://github.com/Reference-ScaLAPACK/scalapack/pull/10
patch("mpi2-compatibility.patch", when='@2.0.2:2.0')
# See: https://github.com/Reference-ScaLAPACK/scalapack/pull/16
patch("int_overflow.patch", when='@2.0.0:2.1.0')
# See: https://github.com/Reference-ScaLAPACK/scalapack/pull/23
patch("gcc10-compatibility.patch", when='@2.0.0:2.1.0')
@property
def libs(self):
# Note that the default will be to search
# for 'libnetlib-scalapack.<suffix>'
shared = True if '+shared' in self.spec else False
return find_libraries(
'libscalapack', root=self.prefix, shared=shared, recursive=True
)
def cmake_args(self):
spec = self.spec
options = [
"-DBUILD_SHARED_LIBS:BOOL=%s" % ('ON' if '+shared' in spec else
'OFF'),
"-DBUILD_STATIC_LIBS:BOOL=%s" % ('OFF' if '+shared' in spec else
'ON')
]
# Make sure we use Spack's Lapack:
blas = spec['blas'].libs
lapack = spec['lapack'].libs
options.extend([
'-DLAPACK_FOUND=true',
'-DLAPACK_INCLUDE_DIRS=%s' % spec['lapack'].prefix.include,
'-DLAPACK_LIBRARIES=%s' % (lapack.joined(';')),
'-DBLAS_LIBRARIES=%s' % (blas.joined(';'))
])
c_flags = []
if '+pic' in spec:
c_flags.append(self.compiler.cc_pic_flag)
options.append(
"-DCMAKE_Fortran_FLAGS=%s" % self.compiler.fc_pic_flag
)
# Work around errors of the form:
# error: implicit declaration of function 'BI_smvcopy' is
# invalid in C99 [-Werror,-Wimplicit-function-declaration]
if spec.satisfies('%clang') or spec.satisfies('%apple-clang'):
c_flags.append('-Wno-error=implicit-function-declaration')
options.append(
self.define('CMAKE_C_FLAGS', ' '.join(c_flags))
)
return options
@run_after('install')
def fix_darwin_install(self):
# The shared libraries are not installed correctly on Darwin:
if (sys.platform == 'darwin') and ('+shared' in self.spec):
fix_darwin_install_name(self.spec.prefix.lib)
class NetlibScalapack(ScalapackBase):
"""ScaLAPACK is a library of high-performance linear algebra routines for
parallel distributed memory machines
"""
homepage = "https://www.netlib.org/scalapack/"
url = "https://www.netlib.org/scalapack/scalapack-2.0.2.tgz"
tags = ['e4s']
version('2.1.0', sha256='61d9216cf81d246944720cfce96255878a3f85dec13b9351f1fa0fd6768220a6')
version('2.0.2', sha256='0c74aeae690fe5ee4db7926f49c5d0bb69ce09eea75beb915e00bba07530395c')
version('2.0.1', sha256='a9b34278d4e10b40cbe084c6d87d09af8845e874250719bfbbc497b2a88bfde1')
version('2.0.0', sha256='e51fbd9c3ef3a0dbd81385b868e2355900148eea689bf915c5383d72daf73114')
# versions before 2.0.0 are not using cmake and requires blacs as
# a separated package
| 35.364407 | 95 | 0.627366 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | Bambi/spack | var/spack/repos/builtin/packages/netlib-scalapack/package.py | 4,173 | Python |
"""
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pcluster_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from pcluster_client.model.cluster_info_summary import ClusterInfoSummary
globals()['ClusterInfoSummary'] = ClusterInfoSummary
class DeleteClusterResponseContent(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'cluster': (ClusterInfoSummary,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'cluster': 'cluster', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, cluster, *args, **kwargs): # noqa: E501
"""DeleteClusterResponseContent - a model defined in OpenAPI
Args:
cluster (ClusterInfoSummary):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.cluster = cluster
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.211429 | 110 | 0.589203 | [
"Apache-2.0"
] | Chen188/aws-parallelcluster | api/client/src/pcluster_client/model/delete_cluster_response_content.py | 6,687 | Python |
"""
ASGI config for achristos project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'achristos.settings')
application = get_asgi_application()
| 23.235294 | 78 | 0.787342 | [
"MIT"
] | n0tNoah/achristos | achristos/asgi.py | 395 | Python |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.pipeline.policies import ContentDecodePolicy
from azure.core.pipeline.policies import SansIOHTTPPolicy, HTTPPolicy
from ._models import TextDocumentBatchStatistics
class CognitiveServicesCredentialPolicy(SansIOHTTPPolicy):
def __init__(self, api_key_credential):
self.credential = api_key_credential
super(CognitiveServicesCredentialPolicy, self).__init__()
def on_request(self, request):
request.http_request.headers[
"Ocp-Apim-Subscription-Key"
] = self.credential.api_key
request.http_request.headers["X-BingApis-SDK-Client"] = "Python-SDK"
class TextAnalyticsResponseHook(HTTPPolicy):
def __init__(self, **kwargs):
self._response_callback = kwargs.get("response_hook")
super(TextAnalyticsResponseHook, self).__init__()
def send(self, request):
response_callback = request.context.options.pop("response_hook", self._response_callback)
if response_callback:
response = self.next.send(request)
data = ContentDecodePolicy.deserialize_from_http_generics(response.http_response)
statistics = data.get("statistics", None)
model_version = data.get("modelVersion", None)
batch_statistics = TextDocumentBatchStatistics._from_generated(statistics) # pylint: disable=protected-access
response.statistics = batch_statistics
response.model_version = model_version
response.raw_response = data
response_callback(response)
return response
return self.next.send(request)
| 40.386364 | 122 | 0.68655 | [
"MIT"
] | anuchandy/azure-sdk-for-python | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_policies.py | 1,777 | Python |
# Copyright (c) 2016 Tzutalin
# Create by TzuTaLin <[email protected]>
try:
from PyQt5.QtGui import QImage
except ImportError:
from PyQt4.QtGui import QImage
from base64 import b64encode, b64decode
from libs.pascal_voc_io import PascalVocWriter
from libs.pascal_voc_io import XML_EXT
import os.path
import sys
class LabelFileError(Exception):
pass
class LabelFile(object):
# It might be changed as window creates. By default, using XML ext
# suffix = '.lif'
suffix = XML_EXT
def __init__(self, filename=None):
self.shapes = ()
self.imagePath = None
self.imageData = None
self.verified = False
def savePascalVocFormat(self, filename, shapes, imagePath, imageData,
lineColor=None, fillColor=None, databaseSrc=None):
imgFolderPath = os.path.dirname(imagePath)
imgFolderName = os.path.split(imgFolderPath)[-1]
imgFileName = os.path.basename(imagePath)
#imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
# Read from file path because self.imageData might be empty if saving to
# Pascal format
image = QImage()
image.load(imagePath)
imageShape = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
writer = PascalVocWriter(imgFolderName, imgFileName,
imageShape, localImgPath=imagePath)
writer.verified = self.verified
for shape in shapes:
points = shape['points']
label = shape['label']
# Add Chris
difficult = int(shape['difficult'])
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, difficult)
writer.save(targetFile=filename)
return
def toggleVerify(self):
self.verified = not self.verified
''' ttf is disable
def load(self, filename):
import json
with open(filename, 'rb') as f:
data = json.load(f)
imagePath = data['imagePath']
imageData = b64decode(data['imageData'])
lineColor = data['lineColor']
fillColor = data['fillColor']
shapes = ((s['label'], s['points'], s['line_color'], s['fill_color'])\
for s in data['shapes'])
# Only replace data after everything is loaded.
self.shapes = shapes
self.imagePath = imagePath
self.imageData = imageData
self.lineColor = lineColor
self.fillColor = fillColor
def save(self, filename, shapes, imagePath, imageData, lineColor=None, fillColor=None):
import json
with open(filename, 'wb') as f:
json.dump(dict(
shapes=shapes,
lineColor=lineColor, fillColor=fillColor,
imagePath=imagePath,
imageData=b64encode(imageData)),
f, ensure_ascii=True, indent=2)
'''
@staticmethod
def isLabelFile(filename):
fileSuffix = os.path.splitext(filename)[1].lower()
return fileSuffix == LabelFile.suffix
@staticmethod
def convertPoints2BndBox(points):
xmin = float('inf')
ymin = float('inf')
xmax = float('-inf')
ymax = float('-inf')
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# Martin Kersner, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if xmin < 1:
xmin = 1
if ymin < 1:
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
| 33.058824 | 91 | 0.571429 | [
"MIT"
] | DHZS/labelImg | libs/labelFile.py | 3,934 | Python |
"""
:author: Maikel Punie <[email protected]>
"""
import velbus
class VMB1BLModule(velbus.Module):
"""
Velbus input module with 6 channels
"""
def __init__(self, module_type, module_name, module_address, controller):
velbus.Module.__init__(self, module_type, module_name, module_address, controller)
self._is_closed = {}
self._callbacks = {}
def is_closed(self, channel):
if channel in self._is_closed:
return self._is_closed[channel]
return False
def _load(self):
message = velbus.ModuleStatusRequestMessage(self._address)
message.channels = list(range(1, self.number_of_channels()+1))
self._controller.send(message)
def number_of_channels(self):
return 1
def _on_message(self, message):
if isinstance(message, velbus.PushButtonStatusMessage):
for channel in message.closed:
self._is_closed[channel] = True
for channel in message.opened:
self._is_closed[channel] = False
for channel in message.get_channels():
if channel in self._callbacks:
for callback in self._callbacks[channel]:
callback(self._is_closed[channel])
elif isinstance(message, velbus.ModuleStatusMessage):
for channel in list(range(1, self.number_of_channels() + 1)):
if channel in message.closed:
self._is_closed[channel] = True
else:
self._is_closed[channel] = False
def on_status_update(self, channel, callback):
"""
Callback to execute on status of update of channel
"""
if not channel in self._callbacks:
self._callbacks[channel] = []
self._callbacks[channel].append(callback)
def get_categories(self, channel):
return ['binary_sensor']
class VMB2BLModule(VMB1BLModule):
"""
Velbus input module with 7 channels
"""
def number_of_channels(self):
return 2
class VMB1BLEModule(velbus.Module):
def number_of_channels(self):
return 1
class VMB2BLEModule(velbus.Module):
def number_of_channels(self):
return 2
velbus.register_module('VMB1BL', VMB1BLModule)
velbus.register_module('VMB2BL', VMB2BLModule)
velbus.register_module('VMB1BLE', VMB1BLEModule)
velbus.register_module('VMB2BLE', VMB2BLEModule)
| 32.184211 | 90 | 0.641864 | [
"MIT"
] | ddanssaert/python-velbus | velbus/modules/vmbbl.py | 2,446 | Python |
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.shortcuts import render, redirect
# Create your views here.
from .forms import LoginForm, RegisterForm
User = get_user_model()
def register_view(request):
form = RegisterForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get("username")
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password1")
password2 = form.cleaned_data.get("password2")
try:
user = User.objects.create_user(username, email, password)
except:
user = None
if user != None:
login(request, user)
return redirect("/")
else:
request.session['register_error'] = 1 # 1 == True
return render(request, "forms.html", {"form": form})
def login_view(request):
form = LoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
# Authenticate checks if the username and password is correct
user = authenticate(request, username=username, password=password)
if user != None:
# User is valid and active -> is_active
# request.user == user
login(request, user)
# Login succes redirect
return redirect("/")
else:
# Count user login attempt (simple way)
# attempt = request.session.get("attempt") or 0
# request.session['attempt'] = attempt + 1
# return redirect("/invalid-password")
request.session['invalid_user'] = 1 # 1 == True
return render(request, "forms.html", {"form": form})
def logout_view(request):
logout(request)
# request.user == Anon user
return redirect("/login") | 34.927273 | 75 | 0.609058 | [
"MIT"
] | aklauritzen/django-bootcamp | accounts/views.py | 1,921 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class DescribeDrdsInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2019-01-23', 'DescribeDrdsInstances','Drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_Expired(self):
return self.get_query_params().get('Expired')
def set_Expired(self,Expired):
self.add_query_param('Expired',Expired)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Tags(self):
return self.get_query_params().get('Tag')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_Mix(self):
return self.get_query_params().get('Mix')
def set_Mix(self,Mix):
self.add_query_param('Mix',Mix) | 33.511905 | 89 | 0.738899 | [
"Apache-2.0"
] | ankitdobhal/aliyun-openapi-python-sdk | aliyun-python-sdk-drds/aliyunsdkdrds/request/v20190123/DescribeDrdsInstancesRequest.py | 2,815 | Python |
default_app_config = "%s.apps.AppConfig" % __name__
__version__ = "0.2.1"
| 18.75 | 51 | 0.72 | [
"BSD-3-Clause"
] | bashu/django-fineuploader | fineuploader/__init__.py | 75 | Python |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from cli_common.log import get_logger
logger = get_logger(__name__)
WORKER_CHECKOUT = '/builds/worker/checkouts/gecko'
class AnalysisTask(object):
'''
An analysis CI task running on Taskcluster
'''
artifacts = []
valid_states = ('completed', 'failed')
def __init__(self, task_id, task_status):
self.id = task_id
assert 'task' in task_status, 'No task data for {}'.format(self.id)
assert 'status' in task_status, 'No status data for {}'.format(self.id)
self.task = task_status['task']
self.status = task_status['status']
@property
def run_id(self):
return self.status['runs'][-1]['runId']
@property
def name(self):
return self.task['metadata'].get('name', 'unknown')
@property
def state(self):
return self.status['state']
def load_artifacts(self, queue_service):
# Process only the supported final states
# as some tasks do not always have relevant output
if self.state not in self.valid_states:
logger.warn('Invalid task state', state=self.state, id=self.id, name=self.name)
return
# Load relevant artifacts
out = {}
for artifact_name in self.artifacts:
logger.info('Load artifact', task_id=self.id, artifact=artifact_name)
try:
artifact = queue_service.getArtifact(self.id, self.run_id, artifact_name)
out[artifact_name] = 'response' in artifact and artifact['response'].content or artifact
except Exception as e:
logger.warn('Failed to read artifact', task_id=self.id, run_id=self.run_id, artifact=artifact_name, error=e)
continue
return out
def clean_path(self, path):
'''
Helper to clean issues path from remote tasks
'''
if path.startswith(WORKER_CHECKOUT):
path = path[len(WORKER_CHECKOUT):]
if path.startswith('/'):
path = path[1:]
return path
| 32.753623 | 124 | 0.623894 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Mozilla-GitHub-Standards/7a0517c85b685752ad36ce0e8246040e3de8d842fb0f2696540dfc0c54da847b | src/staticanalysis/bot/static_analysis_bot/task.py | 2,260 | Python |
import time
from array import array
from itertools import product
from time import clock
import sys
from java.lang import Math
sys.path.append("./ABAGAIL.jar")
import java.util.Random as Random
from shared import ConvergenceTrainer
from opt.example import FourPeaksEvaluationFunction
from opt.ga import DiscreteChangeOneMutation, SingleCrossOver
import dist.DiscreteDependencyTree as DiscreteDependencyTree
import dist.DiscreteUniformDistribution as DiscreteUniformDistribution
import opt.DiscreteChangeOneNeighbor as DiscreteChangeOneNeighbor
import dist.DiscretePermutationDistribution as DiscretePermutationDistribution
import opt.GenericHillClimbingProblem as GenericHillClimbingProblem
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.ga.GenericGeneticAlgorithmProblem as GenericGeneticAlgorithmProblem
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem
import opt.prob.MIMIC as MIMIC
# Adapted from https://github.com/JonathanTay/CS-7641-assignment-2/blob/master/tsp.py
random = Random()
maxIters = [2, int(2e4+1)]
numTrials = 5
# Problem Sizes
N_list = [50, 100, 150, 200]
OUTPUT_DIRECTORY = "output"
outfile = OUTPUT_DIRECTORY + '/PEAKS4/{}/PEAKS4_{}_{}_LOG.csv'
# MIMIC
sample_list = [50, 100, 150, 200]
keepRate_list = [0.2, 0.3, 0.4, 0.5]
for t in range(numTrials):
for samples, keepRate, m, N in product([100], [0.2], [0.5], N_list):
fname = outfile.format('MIMIC', 'MIMIC_{}_{}'.format("problemSizes", N), str(t + 1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time,fevals\n')
T = N / 5
fill = [2] * N
ranges = array('i', fill)
keep = int(samples*keepRate)
ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
df = DiscreteDependencyTree(m, ranges)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
mimic = MIMIC(samples, keep, pop)
fit = ConvergenceTrainer(mimic)
times = [0]
for i in range(0, maxIters[0]):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
score = ef.value(mimic.getOptimal())
fevals = ef.fEvals
ef.fEvals -= 1
st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
print(st)
with open(fname, 'a') as f:
f.write(st)
# RHC
restart_list = [20, 40, 60, 80]
for t in range(numTrials):
for restart, N in product([80], N_list):
fname = outfile.format('RHC', 'RHC_{}_{}'.format("problemSize", N), str(t + 1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time,fevals\n')
T = N / 5
fill = [2] * N
ranges = array('i', fill)
ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
rhc = RandomizedHillClimbing(hcp, restart)
fit = ConvergenceTrainer(rhc)
times = [0]
for i in range(0, maxIters[0]):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
score = ef.value(rhc.getOptimal())
fevals = ef.fEvals
ef.fEvals -= 1
st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
print(st)
with open(fname, 'a') as f:
f.write(st)
# SA
temperature_list = [1E1, 1E3, 1E5, 1E7, 1E9, 1E11]
CE_list = [0.35, 0.55, 0.75, 0.95]
for t in range(numTrials):
for temperature, CE, N in product([1E11], [0.35], N_list):
fname = outfile.format('SA', 'SA_{}_{}'.format("problemSizes", N), str(t + 1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time,fevals\n')
T = N / 5
fill = [2] * N
ranges = array('i', fill)
ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
sa = SimulatedAnnealing(temperature, CE, hcp)
fit = ConvergenceTrainer(sa)
times = [0]
for i in range(0, maxIters[0]):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
score = ef.value(sa.getOptimal())
fevals = ef.fEvals
ef.fEvals -= 1
st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
print(st)
with open(fname, 'a') as f:
f.write(st)
# GA
mateRate_list = [0.2, 0.4, 0.6, 0.8]
mutateRate_list = [0.2, 0.4, 0.6, 0.8]
for t in range(numTrials):
for pop, mateRate, mutateRate, N in product([700], [0.2], [0.6], N_list):
fname = outfile.format('GA', 'GA_{}_{}'.format("problemSizes", N), str(t + 1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time,fevals\n')
mate = int(pop*mateRate)
mutate = int(pop*mutateRate)
T = N / 5
fill = [2] * N
ranges = array('i', fill)
ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
ga = StandardGeneticAlgorithm(pop, mate, mutate, gap)
fit = ConvergenceTrainer(ga)
times = [0]
for i in range(0, maxIters[0]):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
score = ef.value(ga.getOptimal())
fevals = ef.fEvals
ef.fEvals -= 1
st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
print(st)
with open(fname, 'a') as f:
f.write(st)
| 34.214286 | 98 | 0.600128 | [
"BSD-3-Clause"
] | linqiao710/cs7641_a2_shared | jython/peaks4.py | 6,227 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks for use with GTFlow Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.summary_io import SummaryWriterCache
class FeatureImportanceSummarySaver(session_run_hook.SessionRunHook):
"""Hook to save feature importance summaries."""
def __init__(self, model_dir, every_n_steps=1):
"""Create a FeatureImportanceSummarySaver Hook.
This hook creates scalar summaries representing feature importance
for each feature column during training.
Args:
model_dir: model base output directory.
every_n_steps: frequency, in number of steps, for logging summaries.
Raises:
ValueError: If one of the arguments is invalid.
"""
if model_dir is None:
raise ValueError("model dir must be specified.")
self._model_dir = model_dir
self._every_n_steps = every_n_steps
self._last_triggered_step = None
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use FeatureImportanceSummarySaver.")
graph = ops.get_default_graph()
self._feature_names_tensor = graph.get_tensor_by_name(
"gbdt/feature_names:0")
self._feature_usage_counts_tensor = graph.get_tensor_by_name(
"gbdt/feature_usage_counts:0")
self._feature_gains_tensor = graph.get_tensor_by_name(
"gbdt/feature_gains:0")
def before_run(self, run_context):
del run_context # Unused by feature importance summary saver hook.
requests = {
"global_step": self._global_step_tensor,
"feature_names": self._feature_names_tensor,
"feature_usage_counts": self._feature_usage_counts_tensor,
"feature_gains": self._feature_gains_tensor
}
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
del run_context # Unused by feature importance summary saver hook.
# Read result tensors.
global_step = run_values.results["global_step"]
feature_names = run_values.results["feature_names"]
feature_usage_counts = run_values.results["feature_usage_counts"]
feature_gains = run_values.results["feature_gains"]
# Ensure summaries are logged at desired frequency
if (self._last_triggered_step is not None and
global_step < self._last_triggered_step + self._every_n_steps):
return
# Validate tensors.
if (len(feature_names) != len(feature_usage_counts) or
len(feature_names) != len(feature_gains)):
raise RuntimeError(
"Feature names and importance measures have inconsistent lengths.")
# Compute total usage.
total_usage_count = 0.0
for usage_count in feature_usage_counts:
total_usage_count += usage_count
usage_count_norm = 1.0 / total_usage_count if total_usage_count else 1.0
# Compute total gain.
total_gain = 0.0
for gain in feature_gains:
total_gain += gain
gain_norm = 1.0 / total_gain if total_gain else 1.0
# Output summary for each feature.
self._last_triggered_step = global_step
for (name, usage_count, gain) in zip(feature_names, feature_usage_counts,
feature_gains):
output_dir = os.path.join(self._model_dir, name.decode("utf-8"))
summary_writer = SummaryWriterCache.get(output_dir)
usage_count_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_counts", simple_value=usage_count)
])
usage_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_fraction",
simple_value=usage_count * usage_count_norm)
])
summary_writer.add_summary(usage_count_summary, global_step)
summary_writer.add_summary(usage_fraction_summary, global_step)
gains_summary = Summary(value=[
Summary.Value(tag="feature_importance/gains", simple_value=gain)
])
gains_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/gains_fraction",
simple_value=gain * gain_norm)
])
summary_writer.add_summary(gains_summary, global_step)
summary_writer.add_summary(gains_fraction_summary, global_step)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context):
del run_context # unused by FeedFnHook.
return session_run_hook.SessionRunArgs(fetches=None, feed_dict=self.feed_fn)
class StopAfterNTrees(session_run_hook.SessionRunHook):
"""Stop training after building N full trees."""
def __init__(self, n, num_attempted_trees_tensor, num_finalized_trees_tensor):
self._num_trees = n
# num_attempted_trees_tensor and num_finalized_trees_tensor are both
# tensors.
self._num_attempted_trees_tensor = num_attempted_trees_tensor
self._num_finalized_trees_tensor = num_finalized_trees_tensor
def before_run(self, run_context):
del run_context # unused by StopTrainingAfterNTrees.
return session_run_hook.SessionRunArgs({
"num_attempted_trees": self._num_attempted_trees_tensor,
"num_finalized_trees": self._num_finalized_trees_tensor,
})
def after_run(self, run_context, run_values):
num_attempted_trees = run_values.results["num_attempted_trees"]
num_finalized_trees = run_values.results["num_finalized_trees"]
assert num_attempted_trees is not None
assert num_finalized_trees is not None
# Stop when the required number of finalized trees is reached, or when we
# try enough times to build a tree but keep failing.
if (num_finalized_trees >= self._num_trees or
num_attempted_trees > 2 * self._num_trees):
logging.info("Requesting stop since we have reached %d trees.",
num_finalized_trees)
run_context.request_stop()
| 40.05618 | 81 | 0.731136 | [
"Apache-2.0"
] | 252125889/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/trainer_hooks.py | 7,130 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.9
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1beta2DeploymentStrategy(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rolling_update': 'V1beta2RollingUpdateDeployment',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeploymentStrategy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""Gets the rolling_update of this V1beta2DeploymentStrategy. # noqa: E501
:return: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501
:rtype: V1beta2RollingUpdateDeployment
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""Sets the rolling_update of this V1beta2DeploymentStrategy.
:param rolling_update: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501
:type: V1beta2RollingUpdateDeployment
"""
self._rolling_update = rolling_update
@property
def type(self):
"""Gets the type of this V1beta2DeploymentStrategy. # noqa: E501
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
:return: The type of this V1beta2DeploymentStrategy. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1beta2DeploymentStrategy.
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
:param type: The type of this V1beta2DeploymentStrategy. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeploymentStrategy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeploymentStrategy):
return True
return self.to_dict() != other.to_dict()
| 30.228188 | 124 | 0.60857 | [
"Apache-2.0"
] | playground-julia/kubernetes_asyncio | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | 4,504 | Python |
from requests.exceptions import TooManyRedirects, ConnectionError
from raccoon_src.utils.web_server_validator import WebServerValidator
from raccoon_src.utils.exceptions import WAFException, WebServerValidatorException
from raccoon_src.utils.request_handler import RequestHandler
from raccoon_src.utils.coloring import COLOR, COLORED_COMBOS
from raccoon_src.utils.help_utils import HelpUtilities
from raccoon_src.utils.logger import Logger
SERVER = "Server"
class WAFApplicationMethods:
@classmethod
def detect_cloudfront(cls, res):
service = "CloudFront"
waf_headers = ("Via", "X-cache")
if any(h in res.headers.keys() for h in waf_headers) and any(service.lower() in val for val in res.headers.values()):
return True
if res.headers.get(SERVER) == service:
return True
return
@classmethod
def detect_incapsula(cls, res):
if "X-Iinfo" in res.headers.keys() or res.headers.get("X-CDN") == "Incapsula":
return True
return
@classmethod
def detect_distil(cls, res):
if res.headers.get("x-distil-cs"):
return True
return
@classmethod
def detect_cloudflare(cls, res):
if "CF-RAY" in res.headers.keys() or res.headers.get(SERVER) == "cloudflare":
return True
return
@classmethod
def detect_edgecast(cls, res):
if SERVER in res.headers.keys() and "ECD" in res.headers[SERVER]:
return True
return
@classmethod
def detect_maxcdn(cls, res):
if SERVER in res.headers.keys() and "NetDNA-cache" in res.headers[SERVER]:
return True
return
@classmethod
def detect_sucuri(cls, res):
if any((
res.headers.get(SERVER) == "Sucuri/Cloudproxy",
"X-Sucuri-ID" in res.headers.keys(),
"X-Sucuri-Cache"in res.headers.keys(),
"Access Denied - Sucuri Website Firewall" in res.text)):
return True
return
@classmethod
def detect_reblaze(cls, res):
if res.headers.get(SERVER) == "Reblaze Secure Web Gateway" or res.cookies.get("rbzid"):
return True
return
class WAF:
def __init__(self, host):
self.host = host
self.cnames = host.dns_results.get('CNAME')
self.request_handler = RequestHandler()
self.web_server_validator = WebServerValidator()
self.waf_present = False
self.waf_cname_map = {
"incapdns": "Incapsula",
"edgekey": "Akamai",
"akamai": "Akamai",
"edgesuite": "Akamai",
"distil": "Distil Networks",
"cloudfront": "CloudFront",
"netdna-cdn": "MaxCDN"
}
self.waf_app_method_map = {
"CloudFront": WAFApplicationMethods.detect_cloudfront,
"Cloudflare": WAFApplicationMethods.detect_cloudflare,
"Incapsula": WAFApplicationMethods.detect_incapsula,
"MaxCDN": WAFApplicationMethods.detect_maxcdn,
"Edgecast": WAFApplicationMethods.detect_edgecast,
"Distil Networks": WAFApplicationMethods.detect_distil,
"Sucuri": WAFApplicationMethods.detect_sucuri,
"Reblaze": WAFApplicationMethods.detect_reblaze
}
log_file = HelpUtilities.get_output_path("{}/WAF.txt".format(self.host.target))
self.logger = Logger(log_file)
def _waf_detected(self, name):
self.logger.info(
"{} Detected WAF presence in web application: {}{}{}".format(
COLORED_COMBOS.BAD, COLOR.RED, name, COLOR.RESET))
self.waf_present = True
def _detect_by_cname(self):
for waf in self.waf_cname_map:
if any(waf in str(cname) for cname in self.cnames):
self.logger.info("{} Detected WAF presence in CNAME: {}{}{}".format(
COLORED_COMBOS.BAD, COLOR.RED, self.waf_cname_map.get(waf), COLOR.RESET)
)
self.waf_present = True
async def _detect_by_application(self):
try:
session = self.request_handler.get_new_session()
response = session.get(
timeout=20,
allow_redirects=True,
url="{}://{}:{}".format(
self.host.protocol,
self.host.target,
self.host.port
)
)
for waf, method in self.waf_app_method_map.items():
result = method(response)
if result:
self._waf_detected(waf)
except (ConnectionError, TooManyRedirects) as e:
raise WAFException("Couldn't get response from server.\n"
"Caused due to exception: {}".format(str(e)))
async def detect(self):
self.logger.info("{} Trying to detect WAF presence in {}".format(COLORED_COMBOS.INFO, self.host))
if self.cnames:
self._detect_by_cname()
try:
self.web_server_validator.validate_target_webserver(self.host)
await self._detect_by_application()
if not self.waf_present:
self.logger.info("{} Did not detect WAF presence in target".format(COLORED_COMBOS.GOOD))
except WebServerValidatorException:
self.logger.info(
"{} Target does not seem to have an active web server on port {}. "
"No WAF could be detected on an application level.".format(COLORED_COMBOS.NOTIFY, self.host.port)) | 37.34 | 125 | 0.605428 | [
"MIT"
] | IraqNoPhobia/Raccoon | raccoon_src/lib/waf.py | 5,601 | Python |
"""Tests for http/wsgi.py"""
import io
import asyncio
import socket
import unittest
from unittest import mock
import aiohttp
from aiohttp import multidict
from aiohttp import wsgi
from aiohttp import protocol
from aiohttp import helpers
class TestHttpWsgiServerProtocol(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.wsgi = mock.Mock()
self.reader = mock.Mock()
self.writer = mock.Mock()
self.writer.drain.return_value = ()
self.transport = mock.Mock()
self.transport.get_extra_info.side_effect = [
mock.Mock(family=socket.AF_INET),
('1.2.3.4', 1234),
('2.3.4.5', 80)]
self.headers = multidict.MultiDict({"HOST": "python.org"})
self.raw_headers = [(b"HOST", b"python.org")]
self.message = protocol.RawRequestMessage(
'GET', '/path', (1, 0), self.headers, self.raw_headers,
True, 'deflate')
self.payload = aiohttp.FlowControlDataQueue(self.reader)
self.payload.feed_data(b'data', 4)
self.payload.feed_data(b'data', 4)
self.payload.feed_eof()
def tearDown(self):
self.loop.close()
def test_ctor(self):
srv = wsgi.WSGIServerHttpProtocol(self.wsgi, loop=self.loop)
self.assertIs(srv.wsgi, self.wsgi)
self.assertFalse(srv.readpayload)
def _make_one(self, **kw):
srv = wsgi.WSGIServerHttpProtocol(self.wsgi, loop=self.loop, **kw)
srv.reader = self.reader
srv.writer = self.writer
srv.transport = self.transport
return srv.create_wsgi_environ(self.message, self.payload)
def _make_srv(self, app=None, **kw):
if app is None:
app = self.wsgi
srv = wsgi.WSGIServerHttpProtocol(app, loop=self.loop, **kw)
srv.reader = self.reader
srv.writer = self.writer
srv.transport = self.transport
return srv
def test_environ(self):
environ = self._make_one()
self.assertEqual(environ['RAW_URI'], '/path')
self.assertEqual(environ['wsgi.async'], True)
def test_environ_headers(self):
self.headers.extend(
(('SCRIPT_NAME', 'script'),
('CONTENT-TYPE', 'text/plain'),
('CONTENT-LENGTH', '209'),
('X_TEST', '123'),
('X_TEST', '456')))
environ = self._make_one()
self.assertEqual(environ['CONTENT_TYPE'], 'text/plain')
self.assertEqual(environ['CONTENT_LENGTH'], '209')
self.assertEqual(environ['HTTP_X_TEST'], '123,456')
self.assertEqual(environ['SCRIPT_NAME'], 'script')
self.assertEqual(environ['SERVER_NAME'], 'python.org')
self.assertEqual(environ['SERVER_PORT'], '80')
get_extra_info_calls = self.transport.get_extra_info.mock_calls
expected_calls = [
mock.call('socket'),
mock.call('peername'),
]
self.assertEqual(expected_calls, get_extra_info_calls)
def test_environ_host_header_alternate_port(self):
self.headers.update({'HOST': 'example.com:9999'})
environ = self._make_one()
self.assertEqual(environ['SERVER_PORT'], '9999')
def test_environ_host_header_alternate_port_ssl(self):
self.headers.update({'HOST': 'example.com:9999'})
environ = self._make_one(is_ssl=True)
self.assertEqual(environ['SERVER_PORT'], '9999')
def test_wsgi_response(self):
srv = self._make_srv()
resp = srv.create_wsgi_response(self.message)
self.assertIsInstance(resp, wsgi.WsgiResponse)
def test_wsgi_response_start_response(self):
srv = self._make_srv()
resp = srv.create_wsgi_response(self.message)
resp.start_response(
'200 OK', [('CONTENT-TYPE', 'text/plain')])
self.assertEqual(resp.status, '200 OK')
self.assertIsInstance(resp.response, protocol.Response)
def test_wsgi_response_start_response_exc(self):
srv = self._make_srv()
resp = srv.create_wsgi_response(self.message)
resp.start_response(
'200 OK', [('CONTENT-TYPE', 'text/plain')], ['', ValueError()])
self.assertEqual(resp.status, '200 OK')
self.assertIsInstance(resp.response, protocol.Response)
def test_wsgi_response_start_response_exc_status(self):
srv = self._make_srv()
resp = srv.create_wsgi_response(self.message)
resp.start_response('200 OK', [('CONTENT-TYPE', 'text/plain')])
self.assertRaises(
ValueError,
resp.start_response,
'500 Err', [('CONTENT-TYPE', 'text/plain')], ['', ValueError()])
@mock.patch('aiohttp.wsgi.aiohttp')
def test_wsgi_response_101_upgrade_to_websocket(self, m_asyncio):
srv = self._make_srv()
resp = srv.create_wsgi_response(self.message)
resp.start_response(
'101 Switching Protocols', (('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade')))
self.assertEqual(resp.status, '101 Switching Protocols')
self.assertTrue(m_asyncio.Response.return_value.send_headers.called)
def test_file_wrapper(self):
fobj = io.BytesIO(b'data')
wrapper = wsgi.FileWrapper(fobj, 2)
self.assertIs(wrapper, iter(wrapper))
self.assertTrue(hasattr(wrapper, 'close'))
self.assertEqual(next(wrapper), b'da')
self.assertEqual(next(wrapper), b'ta')
self.assertRaises(StopIteration, next, wrapper)
wrapper = wsgi.FileWrapper(b'data', 2)
self.assertFalse(hasattr(wrapper, 'close'))
def test_handle_request_futures(self):
def wsgi_app(env, start):
start('200 OK', [('Content-Type', 'text/plain')])
f1 = helpers.create_future(self.loop)
f1.set_result(b'data')
fut = helpers.create_future(self.loop)
fut.set_result([f1])
return fut
srv = self._make_srv(wsgi_app)
self.loop.run_until_complete(
srv.handle_request(self.message, self.payload))
content = b''.join(
[c[1][0] for c in self.writer.write.mock_calls])
self.assertTrue(content.startswith(b'HTTP/1.0 200 OK'))
self.assertTrue(content.endswith(b'data'))
def test_handle_request_simple(self):
def wsgi_app(env, start):
start('200 OK', [('Content-Type', 'text/plain')])
return [b'data']
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'data')
stream.feed_eof()
self.message = protocol.RawRequestMessage(
'GET', '/path', (1, 1), self.headers, self.raw_headers,
True, 'deflate')
srv = self._make_srv(wsgi_app, readpayload=True)
self.loop.run_until_complete(
srv.handle_request(self.message, self.payload))
content = b''.join(
[c[1][0] for c in self.writer.write.mock_calls])
self.assertTrue(content.startswith(b'HTTP/1.1 200 OK'))
self.assertTrue(content.endswith(b'data\r\n0\r\n\r\n'))
self.assertFalse(srv._keep_alive)
def test_handle_request_io(self):
def wsgi_app(env, start):
start('200 OK', [('Content-Type', 'text/plain')])
return io.BytesIO(b'data')
srv = self._make_srv(wsgi_app)
self.loop.run_until_complete(
srv.handle_request(self.message, self.payload))
content = b''.join(
[c[1][0] for c in self.writer.write.mock_calls])
self.assertTrue(content.startswith(b'HTTP/1.0 200 OK'))
self.assertTrue(content.endswith(b'data'))
def test_handle_request_keep_alive(self):
def wsgi_app(env, start):
start('200 OK', [('Content-Type', 'text/plain')])
return [b'data']
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'data')
stream.feed_eof()
self.message = protocol.RawRequestMessage(
'GET', '/path', (1, 1), self.headers, self.raw_headers,
False, 'deflate')
srv = self._make_srv(wsgi_app, readpayload=True)
self.loop.run_until_complete(
srv.handle_request(self.message, self.payload))
content = b''.join(
[c[1][0] for c in self.writer.write.mock_calls])
self.assertTrue(content.startswith(b'HTTP/1.1 200 OK'))
self.assertTrue(content.endswith(b'data\r\n0\r\n\r\n'))
self.assertTrue(srv._keep_alive)
def test_handle_request_readpayload(self):
def wsgi_app(env, start):
start('200 OK', [('Content-Type', 'text/plain')])
return [env['wsgi.input'].read()]
srv = self._make_srv(wsgi_app, readpayload=True)
self.loop.run_until_complete(
srv.handle_request(self.message, self.payload))
content = b''.join(
[c[1][0] for c in self.writer.write.mock_calls])
self.assertTrue(content.startswith(b'HTTP/1.0 200 OK'))
self.assertTrue(content.endswith(b'data'))
def test_dont_unquote_environ_path_info(self):
path = '/path/some%20text'
self.message = protocol.RawRequestMessage(
'GET', path, (1, 0), self.headers, self.raw_headers,
True, 'deflate')
environ = self._make_one()
self.assertEqual(environ['PATH_INFO'], path)
def test_authorization(self):
# This header should be removed according to CGI/1.1 and WSGI but
# in our case basic auth is not handled by server, so should
# not be removed
self.headers.extend({'AUTHORIZATION': 'spam'})
self.message = protocol.RawRequestMessage(
'GET', '/', (1, 1), self.headers, self.raw_headers,
True, 'deflate')
environ = self._make_one()
self.assertEqual('spam', environ['HTTP_AUTHORIZATION'])
def test_http_1_0_no_host(self):
headers = multidict.MultiDict({})
self.message = protocol.RawRequestMessage(
'GET', '/', (1, 0), headers, [], True, 'deflate')
environ = self._make_one()
self.assertEqual(environ['SERVER_NAME'], '2.3.4.5')
self.assertEqual(environ['SERVER_PORT'], '80')
def test_family_inet6(self):
self.transport.get_extra_info.side_effect = [
mock.Mock(family=socket.AF_INET6),
("::", 1122, 0, 0),
('2.3.4.5', 80)]
self.message = protocol.RawRequestMessage(
'GET', '/', (1, 0), self.headers, self.raw_headers,
True, 'deflate')
environ = self._make_one()
self.assertEqual(environ['SERVER_NAME'], 'python.org')
self.assertEqual(environ['SERVER_PORT'], '80')
self.assertEqual(environ['REMOTE_ADDR'], '::')
self.assertEqual(environ['REMOTE_PORT'], '1122')
def test_family_unix(self):
if not hasattr(socket, "AF_UNIX"):
self.skipTest("No UNIX address family. (Windows?)")
self.transport.get_extra_info.side_effect = [
mock.Mock(family=socket.AF_UNIX)]
headers = multidict.MultiDict({
'SERVER_NAME': '1.2.3.4', 'SERVER_PORT': '5678',
'REMOTE_ADDR': '4.3.2.1', 'REMOTE_PORT': '8765'})
self.message = protocol.RawRequestMessage(
'GET', '/', (1, 0), headers, self.raw_headers, True, 'deflate')
environ = self._make_one()
self.assertEqual(environ['SERVER_NAME'], '1.2.3.4')
self.assertEqual(environ['SERVER_PORT'], '5678')
self.assertEqual(environ['REMOTE_ADDR'], '4.3.2.1')
self.assertEqual(environ['REMOTE_PORT'], '8765')
| 37.512821 | 76 | 0.61338 | [
"Apache-2.0"
] | tumb1er/aiohttp | tests/test_wsgi.py | 11,704 | Python |
from django.shortcuts import render
| 13 | 36 | 0.794872 | [
"MIT"
] | Alshak/rdm | rdm/wrappers/interaction_views.py | 39 | Python |
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
| 29.2 | 64 | 0.767123 | [
"BSD-3-Clause"
] | brack3t/django-heythere | tests/compat.py | 146 | Python |
import os
from conans import ConanFile, CMake, tools
class XsimdTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
self.run(os.path.join("bin", "test_package"), run_environment=True)
| 21.352941 | 75 | 0.633609 | [
"MIT"
] | 0x8000-0000/conan-center-index | recipes/xsimd/all/test_package/conanfile.py | 363 | Python |
#!/usr/bin/python
import socket
import fcntl
import struct
import os
ip=socket.gethostbyname(socket.gethostname())
hostname=socket.gethostname()
#def get_ip_address(ifname):
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# return socket.inet_ntoa(fcntl.ioctl(
# s.fileno(),
# 0x8915, # SIOCGIFADDR
# struct.pack('256s', ifname[:15])
# )[20:24])
#ip=get_ip_address('ens192')
file = open("/root/inventory", "w")
file.write("[minicloud]\n")
file.write( hostname + " ansible_ssh_host=" + ip + " ansible_ssh_user=root ansible_ssh_port=22 ansible_ssh_pass=\"passw0rd\"\n")
file.close()
| 23.222222 | 128 | 0.692185 | [
"Apache-2.0"
] | jmbarros/minicloud-auto | update_inventory.py | 627 | Python |
from django import forms
from vlabs import Config, AppManager
class VlabsForm(forms.Form):
def __init__(self, *args, **kwargs):
self.vlcg = Config()
self.market = self.vlcg.getmarket()
super(VlabsForm, self).__init__(*args, **kwargs)
self.k = None
self.nameoftheapp = None
def auth(self):
self.fields['username'] = forms.CharField(label='username')
self.fields['password'] = forms.CharField(widget=forms.PasswordInput, label='password')
def selprj(self, list, user):
PRJ_SEL = zip(tuple(list), tuple(list))
self.fields['prj'] = forms.ChoiceField(widget=forms.RadioSelect, label='Progetti attivi', choices=PRJ_SEL)
# self.fields['user'] = forms.CharField(widget=forms.HiddenInput(), label='user', initial=user)
def chprjtempl(self, value):
self.fields['prj'] = forms.ChoiceField(widget=forms.HiddenInput(), label='prj', initial=value)
def createapp(self):
i = []
a = self.vlcg.getmarket()
for j in range(0, len(a)):
i.append(j)
APP_SEL = zip(tuple(i), tuple(a))
self.fields['app'] = forms.ChoiceField(widget=forms.RadioSelect, label='app', choices=APP_SEL)
def createenv(self, inputvar):
c = inputvar['appindex']
for k in inputvar.keys():
self.fields[k] = forms.CharField(label=inputvar[k])
self.fields['nameoftheapp'] = forms.CharField(label='name of the app')
self.fields['appindex'] = forms.CharField(widget=forms.HiddenInput(), label='appindex', initial=c)
self.fields['pvc'] = forms.BooleanField(label='PVC', required=False, initial=False)
self.fields['space'] = forms.IntegerField(label='PVC Space', initial=1, min_value=1, max_value=10)
def deleteapp(self):
i = []
a = self.vlam.getrunning()
for j in range(0, len(a)):
i.append(j)
APP_SEL = zip(tuple(i), tuple(a))
self.fields['run'] = forms.ChoiceField(widget=forms.RadioSelect, label='run', choices=APP_SEL)
def chooseapp(self, value):
self.fields['app'] = forms.ChoiceField(widget=forms.HiddenInput(), label='app', initial=value)
def updatevariables(self, oldvars): ###da qui
for i in range(0, len(oldvars)):
self.fields[oldvars[i].name] = forms.CharField(label=oldvars[i].name, initial=oldvars[i].value)
def setquotas(self, spec_hard):
for k in spec_hard:
self.fields[k] = forms.CharField(label=k, required=False, initial=spec_hard[k])
def setlimits(self, vars):
self.fields['namespace'] = forms.CharField(widget=forms.HiddenInput(), label='namespace', initial=vars)
self.fields['type'] = forms.CharField(widget=forms.HiddenInput(), label='type', initial='limits')
def updatelimits(self, vars):
for i in range (0, len(vars)):
self.fields[vars[i]['name']] = forms.BooleanField(label=vars[i]['name'], initial=False)
def createns(self):
#alphalower = RegexValidator(regex=r'^[a-z]*[a-z0-9\-\_]*[a-z]')
self.fields['namespacename'] = forms.CharField(label='Name', required=True)
'''
a = {'pods', 'requests.cpu', 'requests.memory', 'requests.ephemeral-storage', 'requests.storage', 'limits.cpu', 'limits.memory', 'limits.memory', 'limits.ephemeral-storage', 'configmaps', 'persistentvolumeclaims', 'replicationcontrollers', 'secrets', 'services'}
self.fields['pods'] = forms.CharField(label='pods', required=False, initial=actualvalues['pods'])
self.fields['requests.cpu'] = forms.CharField(label='requests.cpu', required=False, initial=actualvalues['requests.cpu'])
self.fields['requests.memory'] = forms.CharField(label='requests.memory', required=False, initial=actualvalues['requests.memory'])
self.fields['requests.ephemeral-storage'] = forms.CharField(label='requests.ephemeral-storage', required=False, initial=actualvalues['requests.ephemeral-storage'])
self.fields['requests.storage'] = forms.CharField(label='requests.storage', required=False, initial=actualvalues['requests.storage'])
self.fields['limits.cpu'] = forms.CharField(label='limits.cpu', required=False, initial=actualvalues['limits.cpu'])
self.fields['limits.memory'] = forms.CharField(label='limits.memory', required=False, initial=actualvalues['limits.memory'])
self.fields['limits.ephemeral-storage'] = forms.CharField(label='limits.ephemeral-storage', required=False, initial=actualvalues['limits.ephemeral-storage'])
self.fields['configmaps'] = forms.IntegerField(label='configmaps', required=False, initial=actualvalues['configmaps'])
self.fields['persistentvolumeclaims'] = forms.IntegerField(label='persistentvolumeclaims', required=False, initial=actualvalues['persistentvolumeclaims'])
self.fields['replicationcontrollers'] = forms.IntegerField(label='replicationcontrollers', required=False, initial=actualvalues['replicationcontrollers'])
self.fields['secrets'] = forms.IntegerField(label='secrets', required=False, initial=actualvalues['secrets'])
self.fields['services'] = forms.IntegerField(label='services', required=False, initial=actualvalues['services'])
'''
| 54 | 271 | 0.664048 | [
"MIT"
] | virtlabs/vlabs | webui/vlabs/vlabs/forms.py | 5,346 | Python |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Quantum gradient transforms are strategies for computing the gradient of a quantum
circuit that work by **transforming** the quantum circuit into one or more gradient circuits.
These gradient circuits, once executed and post-processed, return the gradient
of the original circuit.
Examples of quantum gradient transforms include finite-differences and parameter-shift
rules.
This module provides a selection of device-independent, differentiable quantum
gradient transforms. As such, these quantum gradient transforms can be used to
compute the gradients of quantum circuits on both simulators and hardware.
In addition, it also includes an API for writing your own quantum gradient
transforms.
These quantum gradient transforms can be used in two ways:
- Transforming quantum circuits directly
- Registering a quantum gradient strategy for use when performing autodifferentiation
with a :class:`QNode <pennylane.QNode>`.
Overview
--------
Gradient transforms
^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: api
finite_diff
param_shift
param_shift_cv
Custom gradients
^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: api
gradient_transform
Utility functions
^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: api
finite_diff_coeffs
generate_shifted_tapes
generate_shift_rule
generate_multi_shift_rule
eigvals_to_frequencies
compute_vjp
batch_vjp
vjp
Registering autodifferentiation gradients
-----------------------------------------
All PennyLane QNodes are automatically differentiable, and can be included
seamlessly within an autodiff pipeline. When creating a :class:`QNode <pennylane.QNode>`, the
strategy for determining the optimal differentiation strategy is *automated*,
and takes into account the circuit, device, autodiff framework, and metadata
(such as whether a finite number of shots are used).
.. code-block:: python
dev = qml.device("default.qubit", wires=2, shots=1000)
@qml.qnode(dev, interface="tf")
def circuit(weights):
...
In particular:
- When using a simulator device with exact measurement statistics, backpropagation
is preferred due to performance and memory improvements.
- When using a hardware device, or a simulator with a finite number of shots,
a quantum gradient transform---such as the parameter-shift rule---is preferred.
If you would like to specify a particular quantum gradient transform to use
when differentiating your quantum circuit, this can be passed when
creating the QNode:
.. code-block:: python
@qml.qnode(dev, gradient_fn=qml.gradients.param_shift)
def circuit(weights):
...
When using your preferred autodiff framework to compute the gradient of your
hybrid quantum-classical cost function, the specified gradient transform
for each QNode will be used.
.. note::
A single cost function may include multiple QNodes, each with their
own quantum gradient transform registered.
Transforming QNodes
-------------------
Alternatively, quantum gradient transforms can be applied manually to QNodes.
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(weights[2], wires=1)
return qml.probs(wires=1)
>>> weights = np.array([0.1, 0.2, 0.3], requires_grad=True)
>>> circuit(weights)
tensor([0.9658079, 0.0341921], requires_grad=True)
>>> qml.gradients.param_shift(circuit)(weights)
tensor([[-0.04673668, -0.09442394, -0.14409127],
[ 0.04673668, 0.09442394, 0.14409127]], requires_grad=True)
Comparing this to autodifferentiation:
>>> qml.grad(circuit)(weights)
array([[-0.04673668, -0.09442394, -0.14409127],
[ 0.04673668, 0.09442394, 0.14409127]])
Quantum gradient transforms can also be applied as decorators to QNodes,
if *only* gradient information is needed. Evaluating the QNode will then
automatically return the gradient:
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.gradients.param_shift
@qml.qnode(dev)
def decorated_circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(weights[2], wires=1)
return qml.probs(wires=1)
>>> decorated_circuit(weights)
tensor([[-0.04673668, -0.09442394, -0.14409127],
[ 0.04673668, 0.09442394, 0.14409127]], requires_grad=True)
.. note::
If your circuit contains any operations not supported by the gradient
transform, the transform will attempt to automatically decompose the
circuit into only operations that support gradients.
.. note::
If you wish to only return the purely **quantum** component of the
gradient---that is, the gradient of the output with respect to
**gate** arguments, not QNode arguments---pass ``hybrid=False``
when applying the transform:
>>> qml.gradients.param_shift(circuit, hybrid=False)(weights)
Differentiating gradient transforms
-----------------------------------
Gradient transforms are themselves differentiable, allowing higher-order
gradients to be computed:
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(weights[2], wires=1)
return qml.expval(qml.PauliZ(1))
>>> weights = np.array([0.1, 0.2, 0.3], requires_grad=True)
>>> circuit(weights)
tensor(0.9316158, requires_grad=True)
>>> qml.gradients.param_shift(circuit)(weights) # gradient
array([[-0.09347337, -0.18884787, -0.28818254]])
>>> qml.jacobian(qml.gradients.param_shift(circuit))(weights) # hessian
array([[[-0.9316158 , 0.01894799, 0.0289147 ],
[ 0.01894799, -0.9316158 , 0.05841749],
[ 0.0289147 , 0.05841749, -0.9316158 ]]])
Transforming tapes
------------------
Gradient transforms can be applied to low-level :class:`~.QuantumTape` objects,
a datastructure representing variational quantum algorithms:
.. code-block:: python
weights = np.array([0.1, 0.2, 0.3], requires_grad=True)
with qml.tape.JacobianTape() as tape:
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(weights[2], wires=1)
qml.expval(qml.PauliZ(1))
Unlike when transforming a QNode, transforming a tape directly
will perform no implicit quantum device evaluation. Instead, it returns
the processed tapes, and a post-processing function, which together
define the gradient:
>>> gradient_tapes, fn = qml.gradients.param_shift(tape)
>>> gradient_tapes
[<JacobianTape: wires=[0, 1], params=3>,
<JacobianTape: wires=[0, 1], params=3>,
<JacobianTape: wires=[0, 1], params=3>,
<JacobianTape: wires=[0, 1], params=3>,
<JacobianTape: wires=[0, 1], params=3>,
<JacobianTape: wires=[0, 1], params=3>]
This can be useful if the underlying circuits representing the gradient
computation need to be analyzed.
The output tapes can then be evaluated and post-processed to retrieve
the gradient:
>>> dev = qml.device("default.qubit", wires=2)
>>> fn(qml.execute(gradient_tapes, dev, None))
[[-0.09347337 -0.18884787 -0.28818254]]
Note that the post-processing function ``fn`` returned by the
gradient transform is applied to the flat list of results returned
from executing the gradient tapes.
Custom gradient transforms
--------------------------
Using the :class:`~.gradient_transform` decorator, custom gradient transforms
can be created:
.. code-block:: python
@gradient_transform
def my_custom_gradient(tape, **kwargs):
...
return gradient_tapes, processing_fn
Once created, a custom gradient transform can be applied directly
to QNodes, or registered as the quantum gradient transform to use
during autodifferentiation.
For more details, please see the :class:`~.gradient_transform`
documentation.
"""
import pennylane as qml
from . import finite_difference
from . import parameter_shift
from . import parameter_shift_cv
from .gradient_transform import gradient_transform
from .finite_difference import finite_diff, finite_diff_coeffs, generate_shifted_tapes
from .parameter_shift import param_shift
from .parameter_shift_cv import param_shift_cv
from .vjp import compute_vjp, batch_vjp, vjp
from .hamiltonian_grad import hamiltonian_grad
from .general_shift_rules import (
eigvals_to_frequencies,
generate_shift_rule,
generate_multi_shift_rule,
)
| 31.838926 | 94 | 0.697407 | [
"Apache-2.0"
] | AkashNarayanan/pennylane | pennylane/gradients/__init__.py | 9,488 | Python |
import pkg_resources
from anonlink import bloommatcher
from anonlink import entitymatch
from anonlink import network_flow
__version__ = pkg_resources.get_distribution('anonlink').version
__author__ = 'Stephen Hardy, Brian Thorne' | 28.875 | 64 | 0.848485 | [
"Apache-2.0"
] | luyang1210/PPRL | anonlink/__init__.py | 231 | Python |
import streamlit as st
import pandas as pd
from PIL import Image
import subprocess
import os
import base64
import pickle
# Molecular descriptor calculator
def desc_calc():
# Performs the descriptor calculation
bashCommand = "java -Xms2G -Xmx2G -Djava.awt.headless=true -jar ./PaDEL-Descriptor/PaDEL-Descriptor.jar -removesalt -standardizenitro -fingerprints -descriptortypes ./PaDEL-Descriptor/PubchemFingerprinter.xml -dir ./ -file descriptors_output.csv"
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
os.remove('molecule.smi')
# File download
def filedownload(df):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="prediction.csv">Download Predictions</a>'
return href
# Model building
def build_model(input_data):
# Reads in saved regression model
load_model = pickle.load(open('acetylcholinesterase_model.pkl', 'rb'))
# Apply model to make predictions
prediction = load_model.predict(input_data)
st.header('**Prediction output**')
prediction_output = pd.Series(prediction, name='pIC50')
molecule_name = pd.Series(load_data[1], name='molecule_name')
df = pd.concat([molecule_name, prediction_output], axis=1)
st.write(df)
st.markdown(filedownload(df), unsafe_allow_html=True)
# Page title
st.markdown("""
# Bioactivity Prediction App
This app allows you to predict the bioactivity of molecules as SMILES towards inhibting the target enzyme Acetylcholinesterase with a QSAR model.
**Credits**
- App built in `Python` + `Streamlit`
- Descriptor calculated using [PaDEL-Descriptor](http://www.yapcwsoft.com/dd/padeldescriptor/) [[Read the Paper]](https://doi.org/10.1002/jcc.21707).
---
""")
# Sidebar
with st.sidebar.header('1. Upload your CSV data'):
uploaded_file = st.sidebar.file_uploader("Upload your input file", type=['txt'])
st.sidebar.markdown("""
[Example input file](https://raw.githubusercontent.com/dataprofessor/bioactivity-prediction-app/main/example_acetylcholinesterase.txt)
""")
if st.sidebar.button('Predict'):
load_data = pd.read_table(uploaded_file, sep=' ', header=None)
load_data.to_csv('molecule.smi', sep = '\t', header = False, index = False)
st.header('**Original input data**')
st.write(load_data)
with st.spinner("Calculating descriptors..."):
desc_calc()
# Read in calculated descriptors and display the dataframe
st.header('**Calculated molecular descriptors**')
desc = pd.read_csv('descriptors_output.csv')
st.write(desc)
st.write(desc.shape)
# Read descriptor list used in previously built model
st.header('**Subset of descriptors from previously built models**')
Xlist = list(pd.read_csv('descriptor_list.csv').columns)
desc_subset = desc[Xlist]
st.write(desc_subset)
st.write(desc_subset.shape)
# Apply trained model to make prediction on query compounds
build_model(desc_subset)
else:
st.info('Upload input data in the sidebar to start!')
| 37.783133 | 250 | 0.728954 | [
"MIT"
] | ferdinand-popp/BIDD | app.py | 3,136 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for transpose_conv."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@register_make_test_function()
def make_transpose_conv_tests(options):
"""Make a set of tests to do transpose_conv."""
# Tensorflow only supports equal strides
test_parameters = [
{
"input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"channel_multiplier": [1, 2],
"output_shape": [[]],
"fully_quantize": [False]
},
# TODO(yunluli): Adding simple tests for now to unblock edgetpu debugging.
# Need to add more test cases.
{
"input_shape": [[1, 3, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"strides": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 3, 3, 2]],
"fully_quantize": [True]
},
{
"input_shape": [[1, 3, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"strides": [[1, 2, 2, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 6, 6, 2]],
"fully_quantize": [True]
},
{
"input_shape": [[1, 4, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"strides": [[1, 2, 2, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 8, 6, 2]],
"fully_quantize": [True]
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
if not parameters["fully_quantize"]:
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
return [input_shape, filter_size]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = tf.compat.v1.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
if not parameters["fully_quantize"]:
input_tensors = [input_tensor, filter_input]
conv_outputs = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
out = tf.compat.v1.nn.conv2d_backprop_input(
input_shape,
filter_input,
conv_outputs,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
else:
input_tensors = [input_tensor]
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-1, max_value=1)
out = tf.nn.conv2d_transpose(
input_tensor,
filter_input,
parameters["output_shape"],
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
if not parameters["fully_quantize"]:
values = [
create_tensor_data(np.float32, input_shape),
create_tensor_data(np.float32, filter_shape)
]
else:
values = [
create_tensor_data(
np.float32, input_shape, min_value=-1, max_value=1),
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| 36.417808 | 80 | 0.61877 | [
"Apache-2.0"
] | 1250281649/tensorflow | tensorflow/lite/testing/op_tests/transpose_conv.py | 5,317 | Python |
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| 36.305412 | 84 | 0.594135 | [
"MIT"
] | slafs/sqlalchemy | lib/sqlalchemy/orm/query.py | 133,495 | Python |
#!/usr/bin/env python
# license removed for brevity
import rospy, tf, socket, sys, struct
from geometry_msgs.msg import PoseStamped
topic = "/mocap_client/ARBI/pose"
UDP_IP = "10.201.0.100"
UDP_PORT = 21444
def callback(data):
x = data.pose.position.x
y = data.pose.position.y
z = data.pose.position.z
qx = data.pose.orientation.x
qy = data.pose.orientation.y
qz = data.pose.orientation.z
qw = data.pose.orientation.w
timestamp = data.header.seq
MESSAGE = struct.pack('ddddddd',x,y,z,qx,qy,qz,qw)
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
def talker():
global sock
rospy.init_node('mocap_2_udp', anonymous=True)
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
rospy.Subscriber(topic, PoseStamped, callback)
rospy.spin()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| 21.431818 | 54 | 0.663839 | [
"MIT"
] | alfalcmar/grvc-utils | mocap_bridge/scripts/mocap2udp.py | 943 | Python |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest import skipUnless
import numpy as np
from numpy.testing import assert_array_equal
from parameterized import parameterized
from monai.apps.pathology.data import PatchWSIDataset
from monai.apps.utils import download_url
from monai.utils import optional_import
_cucim, has_cim = optional_import("cucim")
has_cim = has_cim and hasattr(_cucim, "CuImage")
_, has_osl = optional_import("openslide")
FILE_URL = "https://drive.google.com/uc?id=1sGTKZlJBIz53pfqTxoTqiIQzIoEzHLAe"
base_name, extension = FILE_URL.split("id=")[1], ".tiff"
FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + base_name + extension)
TEST_CASE_0 = [
{
"data": [{"image": FILE_PATH, "location": [0, 0], "label": [1]}],
"region_size": (1, 1),
"grid_shape": (1, 1),
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}],
]
TEST_CASE_1 = [
{
"data": [{"image": FILE_PATH, "location": [10004, 20004], "label": [0, 0, 0, 1]}],
"region_size": (8, 8),
"grid_shape": (2, 2),
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[0]]])},
{"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[0]]])},
{"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])},
{"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])},
],
]
TEST_CASE_2 = [
{
"data": [{"image": FILE_PATH, "location": [0, 0], "label": [1]}],
"region_size": 1,
"grid_shape": 1,
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}],
]
TEST_CASE_3 = [
{
"data": [{"image": FILE_PATH, "location": [0, 0], "label": [[[0, 1], [1, 0]]]}],
"region_size": 1,
"grid_shape": 1,
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0, 1], [1, 0]]])}],
]
TEST_CASE_OPENSLIDE_0 = [
{
"data": [{"image": FILE_PATH, "location": [0, 0], "label": [1]}],
"region_size": (1, 1),
"grid_shape": (1, 1),
"patch_size": 1,
"image_reader_name": "OpenSlide",
},
[{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}],
]
TEST_CASE_OPENSLIDE_1 = [
{
"data": [{"image": FILE_PATH, "location": [10004, 20004], "label": [0, 0, 0, 1]}],
"region_size": (8, 8),
"grid_shape": (2, 2),
"patch_size": 1,
"image_reader_name": "OpenSlide",
},
[
{"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[0]]])},
{"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[0]]])},
{"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])},
{"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])},
],
]
class TestPatchWSIDataset(unittest.TestCase):
def setUp(self):
download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f")
@parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
@skipUnless(has_cim, "Requires CuCIM")
def test_read_patches_cucim(self, input_parameters, expected):
dataset = PatchWSIDataset(**input_parameters)
samples = dataset[0]
for i in range(len(samples)):
self.assertTupleEqual(samples[i]["label"].shape, expected[i]["label"].shape)
self.assertTupleEqual(samples[i]["image"].shape, expected[i]["image"].shape)
self.assertIsNone(assert_array_equal(samples[i]["label"], expected[i]["label"]))
self.assertIsNone(assert_array_equal(samples[i]["image"], expected[i]["image"]))
@parameterized.expand([TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1])
@skipUnless(has_osl, "Requires OpenSlide")
def test_read_patches_openslide(self, input_parameters, expected):
dataset = PatchWSIDataset(**input_parameters)
samples = dataset[0]
for i in range(len(samples)):
self.assertTupleEqual(samples[i]["label"].shape, expected[i]["label"].shape)
self.assertTupleEqual(samples[i]["image"].shape, expected[i]["image"].shape)
self.assertIsNone(assert_array_equal(samples[i]["label"], expected[i]["label"]))
self.assertIsNone(assert_array_equal(samples[i]["image"], expected[i]["image"]))
if __name__ == "__main__":
unittest.main()
| 39.753623 | 110 | 0.594969 | [
"Apache-2.0"
] | LarcoScarsa/MONAI | tests/test_patch_wsi_dataset.py | 5,486 | Python |
from currency_exchanger.currencies.models import Currency
from currency_exchanger.wallets.models import Wallet
from django.db import models
class Stock(models.Model):
symbol = models.CharField(max_length=10)
currency = models.ForeignKey(Currency, on_delete=models.CASCADE, related_name="stocks")
price = models.DecimalField(decimal_places=2, max_digits=10)
def __str__(self):
return self.symbol
class WalletStock(models.Model):
wallet = models.ForeignKey(Wallet, on_delete=models.CASCADE)
stocks = models.ForeignKey(Stock, on_delete=models.CASCADE)
count = models.PositiveIntegerField(default=0)
class Meta:
constraints = [
models.UniqueConstraint(fields=["wallet", "stocks"], name="unique_wallet_stock")
]
class StockTransfer(models.Model):
wallet = models.ForeignKey(Wallet, on_delete=models.CASCADE, related_name="stock_transfers")
stock = models.ForeignKey(Stock, on_delete=models.CASCADE, related_name="+")
amount = models.IntegerField()
class StockHistory(models.Model):
stocks = models.ForeignKey(Stock, on_delete=models.CASCADE, related_name="history")
timestamp = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(decimal_places=2, max_digits=10)
class Meta:
ordering = ["-timestamp"]
| 34.025641 | 96 | 0.740769 | [
"MIT"
] | norbertcyran/currency-exchanger | backend/currency_exchanger/stocks/models.py | 1,327 | Python |
from datetime import date, datetime
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, Field
class BadRequestResponse(BaseModel):
"""The client called the endpoint incorrectly."""
detail: str = Field(
...,
description="A human-readable summary of the client error.",
example="No identifiers provided, at least one is needed.",
)
class NotFoundResponse(BaseModel):
"""No existing record was found for the indentifier."""
detail: str = Field(
...,
description="A human-readable summary of the issue.",
example="Unknown contact_id",
)
| 25.84 | 68 | 0.668731 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Takuya-Miyazaki/ctms-api | ctms/schemas/web.py | 646 | Python |
import re
from epcpy.epc_schemes.base_scheme import EPCScheme
from epcpy.utils.common import ConvertException
from epcpy.utils.regex import BIC_URI
BIC_URI_REGEX = re.compile(BIC_URI)
class BIC(EPCScheme):
"""BIC EPC scheme implementation.
BIC pure identities are of the form:
urn:epc:id:bic:<BICcontainerCode>
Example:
urn:epc:id:bic:CSQU3054383
This class can be created using EPC pure identities via its constructor
"""
def __init__(self, epc_uri) -> None:
super().__init__()
if not BIC_URI_REGEX.fullmatch(epc_uri):
raise ConvertException(message=f"Invalid BIC URI {epc_uri}")
self.epc_uri = epc_uri
container_code = epc_uri.split(":")[4]
self._container_code = container_code
self._owner_code = container_code[0:3]
self._equipment_category_identifier = container_code[3]
self._serial = container_code[4:10]
self._check_digit = container_code[10]
| 26.675676 | 75 | 0.694022 | [
"MIT"
] | nedap/retail-epcpy | epcpy/epc_schemes/bic.py | 987 | Python |
"""
Time series analysis functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import inspect
import functools
import glob
import numpy as np
import os
import weakref
from functools import wraps
from yt.extern.six import add_metaclass, string_types
from yt.convenience import load
from yt.config import ytcfg
from yt.data_objects.data_containers import data_object_registry
from yt.data_objects.derived_quantities import \
derived_quantity_registry
from yt.data_objects.analyzer_objects import \
create_quantity_proxy, \
analysis_task_registry, \
AnalysisTask
from yt.data_objects.particle_trajectories import \
ParticleTrajectories
from yt.funcs import \
iterable, \
ensure_list, \
mylog
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.exceptions import \
YTException, \
YTOutputNotIdentified
from yt.utilities.parallel_tools.parallel_analysis_interface \
import parallel_objects, parallel_root_only, communication_system
from yt.utilities.parameter_file_storage import \
simulation_time_series_registry
class AnalysisTaskProxy(object):
def __init__(self, time_series):
self.time_series = time_series
def __getitem__(self, key):
task_cls = analysis_task_registry[key]
@wraps(task_cls.__init__)
def func(*args, **kwargs):
task = task_cls(*args, **kwargs)
return self.time_series.eval(task)
return func
def keys(self):
return analysis_task_registry.keys()
def __contains__(self, key):
return key in analysis_task_registry
def get_ds_prop(propname):
def _eval(params, ds):
return getattr(ds, propname)
cls = type(propname, (AnalysisTask,),
dict(eval = _eval, _params = tuple()))
return cls
def get_filenames_from_glob_pattern(filenames):
file_list = glob.glob(filenames)
if len(file_list) == 0:
data_dir = ytcfg.get("yt", "test_data_dir")
pattern = os.path.join(data_dir, filenames)
td_filenames = glob.glob(pattern)
if len(td_filenames) > 0:
file_list = td_filenames
else:
raise YTOutputNotIdentified(filenames, {})
return sorted(file_list)
attrs = ("refine_by", "dimensionality", "current_time",
"domain_dimensions", "domain_left_edge",
"domain_right_edge", "unique_identifier",
"current_redshift", "cosmological_simulation",
"omega_matter", "omega_lambda", "omega_radiation",
"hubble_constant")
class TimeSeriesParametersContainer(object):
def __init__(self, data_object):
self.data_object = data_object
def __getattr__(self, attr):
if attr in attrs:
return self.data_object.eval(get_ds_prop(attr)())
raise AttributeError(attr)
class DatasetSeries(object):
r"""The DatasetSeries object is a container of multiple datasets,
allowing easy iteration and computation on them.
DatasetSeries objects are designed to provide easy ways to access,
analyze, parallelize and visualize multiple datasets sequentially. This is
primarily expressed through iteration, but can also be constructed via
analysis tasks (see :ref:`time-series-analysis`).
Parameters
----------
filenames : list or pattern
This can either be a list of filenames (such as ["DD0001/DD0001",
"DD0002/DD0002"]) or a pattern to match, such as
"DD*/DD*.index"). If it's the former, they will be loaded in
order. The latter will be identified with the glob module and then
sorted.
parallel : True, False or int
This parameter governs the behavior when .piter() is called on the
resultant DatasetSeries object. If this is set to False, the time
series will not iterate in parallel when .piter() is called. If
this is set to either True, one processor will be allocated for
each iteration of the loop. If this is set to an integer, the loop
will be parallelized over this many workgroups. It the integer
value is less than the total number of available processors,
more than one processor will be allocated to a given loop iteration,
causing the functionality within the loop to be run in parallel.
setup_function : callable, accepts a ds
This function will be called whenever a dataset is loaded.
mixed_dataset_types : True or False, default False
Set to True if the DatasetSeries will load different dataset types, set
to False if loading dataset of a single type as this will result in a
considerable speed up from not having to figure out the dataset type.
Examples
--------
>>> ts = DatasetSeries(
"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0")
>>> for ds in ts:
... SlicePlot(ds, "x", "Density").save()
...
>>> def print_time(ds):
... print ds.current_time
...
>>> ts = DatasetSeries(
... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0",
... setup_function = print_time)
...
>>> for ds in ts:
... SlicePlot(ds, "x", "Density").save()
"""
def __new__(cls, outputs, *args, **kwargs):
if isinstance(outputs, string_types):
outputs = get_filenames_from_glob_pattern(outputs)
ret = super(DatasetSeries, cls).__new__(cls)
try:
ret._pre_outputs = outputs[:]
except TypeError:
raise YTOutputNotIdentified(outputs, {})
return ret
def __init__(self, outputs, parallel = True, setup_function = None,
mixed_dataset_types = False, **kwargs):
# This is needed to properly set _pre_outputs for Simulation subclasses.
self._mixed_dataset_types = mixed_dataset_types
if iterable(outputs) and not isinstance(outputs, string_types):
self._pre_outputs = outputs[:]
self.tasks = AnalysisTaskProxy(self)
self.params = TimeSeriesParametersContainer(self)
if setup_function is None:
setup_function = lambda a: None
self._setup_function = setup_function
for type_name in data_object_registry:
setattr(self, type_name, functools.partial(
DatasetSeriesObject, self, type_name))
self.parallel = parallel
self.kwargs = kwargs
def __iter__(self):
# We can make this fancier, but this works
for o in self._pre_outputs:
if isinstance(o, string_types):
ds = self._load(o, **self.kwargs)
self._setup_function(ds)
yield ds
else:
yield o
def __getitem__(self, key):
if isinstance(key, slice):
if isinstance(key.start, float):
return self.get_range(key.start, key.stop)
# This will return a sliced up object!
return DatasetSeries(self._pre_outputs[key], self.parallel)
o = self._pre_outputs[key]
if isinstance(o, string_types):
o = self._load(o, **self.kwargs)
self._setup_function(o)
return o
def __len__(self):
return len(self._pre_outputs)
@property
def outputs(self):
return self._pre_outputs
def piter(self, storage = None, dynamic = False):
r"""Iterate over time series components in parallel.
This allows you to iterate over a time series while dispatching
individual components of that time series to different processors or
processor groups. If the parallelism strategy was set to be
multi-processor (by "parallel = N" where N is an integer when the
DatasetSeries was created) this will issue each dataset to an
N-processor group. For instance, this would allow you to start a 1024
processor job, loading up 100 datasets in a time series and creating 8
processor groups of 128 processors each, each of which would be
assigned a different dataset. This could be accomplished as shown in
the examples below. The *storage* option is as seen in
:func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`
which is a mechanism for storing results of analysis on an individual
dataset and then combining the results at the end, so that the entire
set of processors have access to those results.
Note that supplying a *store* changes the iteration mechanism; see
below.
Parameters
----------
storage : dict
This is a dictionary, which will be filled with results during the
course of the iteration. The keys will be the dataset
indices and the values will be whatever is assigned to the *result*
attribute on the storage during iteration.
dynamic : boolean
This governs whether or not dynamic load balancing will be
enabled. This requires one dedicated processor; if this
is enabled with a set of 128 processors available, only
127 will be available to iterate over objects as one will
be load balancing the rest.
Examples
--------
Here is an example of iteration when the results do not need to be
stored. One processor will be assigned to each dataset.
>>> ts = DatasetSeries("DD*/DD*.index")
>>> for ds in ts.piter():
... SlicePlot(ds, "x", "Density").save()
...
This demonstrates how one might store results:
>>> def print_time(ds):
... print ds.current_time
...
>>> ts = DatasetSeries("DD*/DD*.index",
... setup_function = print_time )
...
>>> my_storage = {}
>>> for sto, ds in ts.piter(storage=my_storage):
... v, c = ds.find_max("density")
... sto.result = (v, c)
...
>>> for i, (v, c) in sorted(my_storage.items()):
... print "% 4i %0.3e" % (i, v)
...
This shows how to dispatch 4 processors to each dataset:
>>> ts = DatasetSeries("DD*/DD*.index",
... parallel = 4)
>>> for ds in ts.piter():
... ProjectionPlot(ds, "x", "Density").save()
...
"""
if self.parallel is False:
njobs = 1
elif dynamic is False:
if self.parallel is True:
njobs = -1
else:
njobs = self.parallel
else:
my_communicator = communication_system.communicators[-1]
nsize = my_communicator.size
if nsize == 1:
self.parallel = False
dynamic = False
njobs = 1
else:
njobs = nsize - 1
for output in parallel_objects(self._pre_outputs, njobs=njobs,
storage=storage, dynamic=dynamic):
if storage is not None:
sto, output = output
if isinstance(output, string_types):
ds = self._load(output, **self.kwargs)
self._setup_function(ds)
else:
ds = output
if storage is not None:
next_ret = (sto, ds)
else:
next_ret = ds
yield next_ret
def eval(self, tasks, obj=None):
tasks = ensure_list(tasks)
return_values = {}
for store, ds in self.piter(return_values):
store.result = []
for task in tasks:
try:
style = inspect.getargspec(task.eval)[0][1]
if style == 'ds':
arg = ds
elif style == 'data_object':
if obj is None:
obj = DatasetSeriesObject(self, "all_data")
arg = obj.get(ds)
rv = task.eval(arg)
# We catch and store YT-originating exceptions
# This fixes the standard problem of having a sphere that's too
# small.
except YTException:
pass
store.result.append(rv)
return [v for k, v in sorted(return_values.items())]
@classmethod
def from_filenames(cls, filenames, parallel = True, setup_function = None,
**kwargs):
r"""Create a time series from either a filename pattern or a list of
filenames.
This method provides an easy way to create a
:class:`~yt.data_objects.time_series.DatasetSeries`, given a set of
filenames or a pattern that matches them. Additionally, it can set the
parallelism strategy.
Parameters
----------
filenames : list or pattern
This can either be a list of filenames (such as ["DD0001/DD0001",
"DD0002/DD0002"]) or a pattern to match, such as
"DD*/DD*.index"). If it's the former, they will be loaded in
order. The latter will be identified with the glob module and then
sorted.
parallel : True, False or int
This parameter governs the behavior when .piter() is called on the
resultant DatasetSeries object. If this is set to False, the time
series will not iterate in parallel when .piter() is called. If
this is set to either True or an integer, it will be iterated with
1 or that integer number of processors assigned to each parameter
file provided to the loop.
setup_function : callable, accepts a ds
This function will be called whenever a dataset is loaded.
Examples
--------
>>> def print_time(ds):
... print ds.current_time
...
>>> ts = DatasetSeries.from_filenames(
... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0",
... setup_function = print_time)
...
>>> for ds in ts:
... SlicePlot(ds, "x", "Density").save()
"""
if isinstance(filenames, string_types):
filenames = get_filenames_from_glob_pattern(filenames)
# This will crash with a less informative error if filenames is not
# iterable, but the plural keyword should give users a clue...
for fn in filenames:
if not isinstance(fn, string_types):
raise YTOutputNotIdentified("DataSeries accepts a list of "
"strings, but "
"received {0}".format(fn))
obj = cls(filenames[:], parallel = parallel,
setup_function = setup_function, **kwargs)
return obj
@classmethod
def from_output_log(cls, output_log,
line_prefix = "DATASET WRITTEN",
parallel = True):
filenames = []
for line in open(output_log):
if not line.startswith(line_prefix): continue
cut_line = line[len(line_prefix):].strip()
fn = cut_line.split()[0]
filenames.append(fn)
obj = cls(filenames, parallel = parallel)
return obj
_dataset_cls = None
def _load(self, output_fn, **kwargs):
if self._dataset_cls is not None:
return self._dataset_cls(output_fn, **kwargs)
elif self._mixed_dataset_types:
return load(output_fn, **kwargs)
ds = load(output_fn, **kwargs)
self._dataset_cls = ds.__class__
return ds
def particle_trajectories(self, indices, fields=None, suppress_logging=False, ptype=None):
r"""Create a collection of particle trajectories in time over a series of
datasets.
Parameters
----------
indices : array_like
An integer array of particle indices whose trajectories we
want to track. If they are not sorted they will be sorted.
fields : list of strings, optional
A set of fields that is retrieved when the trajectory
collection is instantiated. Default: None (will default
to the fields 'particle_position_x', 'particle_position_y',
'particle_position_z')
suppress_logging : boolean
Suppress yt's logging when iterating over the simulation time
series. Default: False
ptype : str, optional
Only use this particle type. Default: None, which uses all particle type.
Examples
--------
>>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
>>> my_fns.sort()
>>> fields = ["particle_position_x", "particle_position_y",
>>> "particle_position_z", "particle_velocity_x",
>>> "particle_velocity_y", "particle_velocity_z"]
>>> ds = load(my_fns[0])
>>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
>>> indices = init_sphere["particle_index"].astype("int")
>>> ts = DatasetSeries(my_fns)
>>> trajs = ts.particle_trajectories(indices, fields=fields)
>>> for t in trajs :
>>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
Note
----
This function will fail if there are duplicate particle ids or if some of the particle
disappear.
"""
return ParticleTrajectories(self, indices, fields=fields, suppress_logging=suppress_logging,
ptype=ptype)
class TimeSeriesQuantitiesContainer(object):
def __init__(self, data_object, quantities):
self.data_object = data_object
self.quantities = quantities
def __getitem__(self, key):
if key not in self.quantities: raise KeyError(key)
q = self.quantities[key]
def run_quantity_wrapper(quantity, quantity_name):
@wraps(derived_quantity_registry[quantity_name][1])
def run_quantity(*args, **kwargs):
to_run = quantity(*args, **kwargs)
return self.data_object.eval(to_run)
return run_quantity
return run_quantity_wrapper(q, key)
class DatasetSeriesObject(object):
def __init__(self, time_series, data_object_name, *args, **kwargs):
self.time_series = weakref.proxy(time_series)
self.data_object_name = data_object_name
self._args = args
self._kwargs = kwargs
qs = dict([(qn, create_quantity_proxy(qv)) for qn, qv in derived_quantity_registry.items()])
self.quantities = TimeSeriesQuantitiesContainer(self, qs)
def eval(self, tasks):
return self.time_series.eval(tasks, self)
def get(self, ds):
# We get the type name, which corresponds to an attribute of the
# index
cls = getattr(ds, self.data_object_name)
return cls(*self._args, **self._kwargs)
class RegisteredSimulationTimeSeries(type):
def __init__(cls, name, b, d):
type.__init__(cls, name, b, d)
code_name = name[:name.find('Simulation')]
if code_name:
simulation_time_series_registry[code_name] = cls
mylog.debug("Registering simulation: %s as %s", code_name, cls)
@add_metaclass(RegisteredSimulationTimeSeries)
class SimulationTimeSeries(DatasetSeries):
def __init__(self, parameter_filename, find_outputs=False):
"""
Base class for generating simulation time series types.
Principally consists of a *parameter_filename*.
"""
if not os.path.exists(parameter_filename):
raise IOError(parameter_filename)
self.parameter_filename = parameter_filename
self.basename = os.path.basename(parameter_filename)
self.directory = os.path.dirname(parameter_filename)
self.parameters = {}
self.key_parameters = []
# Set some parameter defaults.
self._set_parameter_defaults()
# Read the simulation dataset.
self._parse_parameter_file()
# Set units
self._set_units()
# Figure out the starting and stopping times and redshift.
self._calculate_simulation_bounds()
# Get all possible datasets.
self._get_all_outputs(find_outputs=find_outputs)
self.print_key_parameters()
def _set_parameter_defaults(self):
pass
def _parse_parameter_file(self):
pass
def _set_units(self):
pass
def _calculate_simulation_bounds(self):
pass
def _get_all_outputs(**kwargs):
pass
def __repr__(self):
return self.parameter_filename
_arr = None
@property
def arr(self):
if self._arr is not None:
return self._arr
self._arr = functools.partial(YTArray, registry = self.unit_registry)
return self._arr
_quan = None
@property
def quan(self):
if self._quan is not None:
return self._quan
self._quan = functools.partial(YTQuantity,
registry = self.unit_registry)
return self._quan
@parallel_root_only
def print_key_parameters(self):
"""
Print out some key parameters for the simulation.
"""
if self.simulation_type == "grid":
for a in ["domain_dimensions", "domain_left_edge",
"domain_right_edge"]:
self._print_attr(a)
for a in ["initial_time", "final_time",
"cosmological_simulation"]:
self._print_attr(a)
if getattr(self, "cosmological_simulation", False):
for a in ["box_size", "omega_matter", "omega_lambda",
"omega_radiation", "hubble_constant",
"initial_redshift", "final_redshift"]:
self._print_attr(a)
for a in self.key_parameters:
self._print_attr(a)
mylog.info("Total datasets: %d." % len(self.all_outputs))
def _print_attr(self, a):
"""
Print the attribute or warn about it missing.
"""
if not hasattr(self, a):
mylog.error("Missing %s in dataset definition!", a)
return
v = getattr(self, a)
mylog.info("Parameters: %-25s = %s", a, v)
def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
r"""
Get datasets at or near to given values.
Parameters
----------
key: str
The key by which to retrieve outputs, usually 'time' or
'redshift'.
values: array_like
A list of values, given as floats.
tolerance : float
If not None, do not return a dataset unless the value is
within the tolerance value. If None, simply return the
nearest dataset.
Default: None.
outputs : list
The list of outputs from which to choose. If None,
self.all_outputs is used.
Default: None.
Examples
--------
>>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
"""
if not isinstance(values, YTArray):
if isinstance(values, tuple) and len(values) == 2:
values = self.arr(*values)
else:
values = self.arr(values)
values = values.in_base()
if outputs is None:
outputs = self.all_outputs
my_outputs = []
if not outputs:
return my_outputs
for value in values:
outputs.sort(key=lambda obj:np.abs(value - obj[key]))
if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
and outputs[0] not in my_outputs:
my_outputs.append(outputs[0])
else:
mylog.error("No dataset added for %s = %f.", key, value)
outputs.sort(key=lambda obj: obj['time'])
return my_outputs
| 37.576687 | 100 | 0.601102 | [
"BSD-3-Clause-Clear"
] | edilberto100/yt | yt/data_objects/time_series.py | 24,500 | Python |
from core.attack.attack import Attack
import random
import os
import re
import sys
import json
try:
from lxml import etree
except ImportError:
print("Failed to import ElementTree from any known place")
sys.exit(0)
try:
from bs4 import UnicodeDammit # BeautifulSoup 4
def decode_html(html_string):
converted = UnicodeDammit(html_string)
if not converted.unicode_markup:
raise UnicodeDecodeError(
"Failed to detect encoding, tried [%s]",
', '.join(converted.tried_encodings))
return converted.unicode_markup
except ImportError:
from BeautifulSoup import UnicodeDammit # BeautifulSoup 3
def decode_html(html_string):
converted = UnicodeDammit(html_string, isHTML=True)
if not converted.unicode:
raise UnicodeDecodeError(
"Failed to detect encoding, tried [%s]",
', '.join(converted.triedEncodings))
return converted.unicode
class mod_unfilter(Attack):
"""This class implements a unfilter vulnerabilities generator."""
name = "unfilter"
payloads = []
settings = {}
highest = 1
index = random.randint(0, highest)
CONFIG_FILE = "unfilterPayloads.txt"
require = []
PRIORITY = 5
def __init__(self, fp=None):
Attack.__init__(self, fp)
self.fd = open(os.path.join(self.CONFIG_DIR,
self.name, self.CONFIG_FILE), "r+")
self.payloads = json.load(self.fd)
def generateHandler(self, tree_node=None, o=None, elem=None):
if elem['type'] != "text":
o[int(elem['lineno']) - 1] = re.sub(r'(.*){0}(.*)'.format(elem['identifier']), lambda m: "{0}{1}{2}".format(m.group(1), self.payloads['payloads'][
self.payloads['revisable']][self.index]['vector'].format(elem['identifier'].replace(' ', '_')), m.group(2)), o[int(elem['lineno']) - 1], flags=re.IGNORECASE)
if elem['report'] is not None:
self.settings['key'].append(
elem['identifier'].replace(' ', '_'))
self.settings['value'].append('Boik')
else:
o[int(elem['lineno']) - 1] = re.sub(r'(.*){0}\s*([a-z!\.\?]+)(.*)'.format(elem['identifier']), lambda m: "{0}{1} {2}{3}".format(m.group(1), elem['identifier'], self.payloads[
'payloads'][self.payloads['revisable']][self.index]['vector'].format(elem['identifier'].replace(' ', '_')), m.group(3)), o[int(elem['lineno']) - 1], flags=re.IGNORECASE)
if elem['report'] is not None:
self.settings['key'].append(
elem['identifier'].replace(' ', '_'))
self.settings['value'].append('Boik')
def doJob(self, http_res, backend, dbms, parent=None):
"""This method do a Job."""
self.payloads[
'revisable'] = 'True' if self.doReturn is False else 'False'
self.settings = self.generate_payloads(http_res, parent=parent)
return self.settings
def study(self, etree_node, entries=None, lines=None, parent=None):
for identifier in self.payloads['identifiers']["{0}".format(parent if (parent is not None and parent in self.payloads['identifiers']) else "others")]:
tmp_id = identifier.split('->')
(report, identifier) = (tmp_id[0], tmp_id[1]) if len(
tmp_id) == 2 else (None, tmp_id[0])
found_node = etree_node.xpath("//*[@*[re:test(., '{0}', 'i')] or @*[re:test(name(.), '{0}', 'i')] or re:test(local-name(),'{0}', 'i') or text()[re:test(., '{0}', 'i')]]".format(
identifier), namespaces={'re': "http://exslt.org/regular-expressions"})
if found_node is not None and len(found_node) != 0:
for node in found_node:
if identifier in node.tag:
if self.verbose:
self.logY("Found in tag name {0}".format(node.tag))
d = {"type": "tag", "value": node.tag, "lineno": node.text.strip(
), "identifier": identifier, "report": report}
if d not in entries:
if self.verbose:
self.logY("\t{0}".format(d))
entries.append(d)
elif node.text is not None and identifier in node.text:
if self.verbose:
self.logY(
"Found in text, tag {0}".format(node.tag))
d = {"type": "text", "parent": node.tag, "lineno": node.getprevious().text.strip() if node.getprevious(
) is not None else node.getparent().getprevious().text.strip(), "identifier": identifier, "report": report}
if d not in entries:
if self.verbose:
self.logY("\t{0}".format(d))
entries.append(d)
for k, v in node.attrib.iteritems():
if identifier in v:
if self.verbose:
self.logY(
"Found in attribute value {0} of tag {1}".format(k, node.tag))
d = {"type": "attrval", "name": k, "tag": node.tag, "lineno": node.getprevious(
).text.strip(), "identifier": identifier, "report": report}
if d not in entries:
if self.verbose:
self.logY("\t{0}".format(d))
entries.append(d)
if identifier in k:
if self.verbose:
self.logY(
"Found in attribute name {0} of tag {1}".format(k, node.tag))
d = {"type": "attrname", "name": k, "tag": node.tag, "lineno": node.getprevious(
).text.strip(), "identifier": identifier, "report": report}
if d not in entries:
if self.verbose:
self.logY("\t{0}".format(d))
entries.append(d)
found_node = etree_node.xpath("//comment()[re:test(., '{0}', 'i')]".format(
identifier), namespaces={'re': "http://exslt.org/regular-expressions"})
if found_node is not None and len(found_node) != 0:
for node in found_node:
if self.verbose:
self.logY(
"Found in comment, content: \"{0}\"".format(node))
d = {"type": "comment", "lineno": (node.getparent().getprevious().text.strip()) if (node.getprevious(
) is None) else (node.getprevious().text.strip()), "identifier": identifier, "report": report}
if d not in entries:
if self.verbose:
self.logY("\t{0}".format(d))
entries.append(d)
# Generate payloads based on what situations we met.
def generate_payloads(self, html_code, parent=None):
e = []
o = []
l = []
for index, line in enumerate(html_code.splitlines(), 1):
o.append(line)
l.append("<!-- {0} -->{1}".format(index, line))
tree = etree.HTML(decode_html("\n".join(l))).getroottree()
self.study(tree, entries=e, lines=l, parent=parent)
self.settings = {"key": [], "value": [], "html": "",
"extra": {}, "warning": [], "error": []}
for elem in e:
# <a href="inject_point"></a>
if elem['type'] == "attrval":
found_node = etree.HTML(l[int(elem['lineno']) - 1]).xpath("//*[@*[re:test(., '{0}', 'i')]]".format(
elem['identifier']), namespaces={'re': "http://exslt.org/regular-expressions"})
if len(found_node) == 1:
self.generateHandler(tree_node=tree, o=o, elem=elem)
# <a inject_point="test">
elif elem['type'] == "attrname":
found_node = etree.HTML(l[int(elem['lineno']) - 1]).xpath("//*[@*[re:test(name(.), '{0}', 'i')]]".format(
elem['identifier']), namespaces={'re': "http://exslt.org/regular-expressions"})
if len(found_node) == 1:
self.generateHandler(tree_node=tree, o=o, elem=elem)
# <inject_point name="test" />
elif elem['type'] == "tag":
found_node = etree.HTML(l[int(elem['lineno']) - 1]).xpath("//*[re:test(local-name(), '{0}', 'i')]".format(
elem['identifier']), namespaces={'re': "http://exslt.org/regular-expressions"})
if len(found_node) == 1:
self.generateHandler(tree_node=tree, o=o, elem=elem)
# <span>inject_point</span>
elif elem['type'] == "text":
found_node = etree.HTML(
l[int(elem['lineno']) - 1]).xpath("//*[text()]")
if len(found_node) == 1:
self.generateHandler(tree_node=tree, o=o, elem=elem)
# <!-- inject_point -->
elif elem['type'] == "comment":
try:
found_node = etree.HTML(
l[int(elem['lineno']) - 1]).xpath("//*[comment()]")
except:
found_node = etree.HTML("{0}{1}{2}".format("<div>", l[int(elem['lineno']) - 1], "</div>")).xpath(
"//comment()[re:test(., '{0}', 'i')]".format(elem['identifier']), namespaces={'re': "http://exslt.org/regular-expressions"})
if len(found_node) == 1:
self.generateHandler(tree_node=tree, o=o, elem=elem)
self.settings['html'] = "\n".join(o)
if not self.settings['warning']:
self.settings.pop('warning', None)
if not self.settings['error']:
self.settings.pop('error', None)
return self.settings
| 51.17 | 189 | 0.496385 | [
"Apache-2.0"
] | qazbnm456/VWGen | core/attack/mod_unfilter.py | 10,234 | Python |
import os, os.path
import shutil
from play.utils import *
COMMANDS = ['idealize', 'idea']
HELP = {
'idealize': 'Create all IntelliJ Idea configuration files'
}
def execute(**kargs):
command = kargs.get("command")
app = kargs.get("app")
args = kargs.get("args")
play_env = kargs.get("env")
app.check()
modules = app.modules()
classpath = app.getClasspath()
application_name = app.readConf('application.name')
imlFile = os.path.join(app.path, application_name + '.iml')
shutil.copyfile(os.path.join(play_env["basedir"], 'resources/idea/imlTemplate.xml'), imlFile)
cpXML = ""
replaceAll(imlFile, r'%PLAYHOME%', play_env["basedir"].replace('\\', '/'))
if len(modules):
lXML = ""
cXML = ""
for module in modules:
lXML += ' <content url="file://%s">\n <sourceFolder url="file://%s" isTestSource="false" />\n </content>\n' % (module, os.path.join(module, 'app').replace('\\', '/'))
replaceAll(imlFile, r'%LINKS%', lXML)
else:
replaceAll(imlFile, r'%LINKS%', '')
print "~ OK, the application is ready for Intellij Idea"
print "~ Use File/New Module/Import Existing module"
print "~"
| 29.731707 | 189 | 0.607875 | [
"MIT"
] | ericlink/adms-server | playframework-dist/1.1-src/framework/pym/play/commands/intellij.py | 1,219 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 10:11:14 2018
@author: magalidrumare
@ copyright https://github.com/fchollet/deep-learning-with-python-notebooks
"""
# Use of a pre-trained convnet : VGG16
# An effective approach to deep learning on small image dataset is to leverage a pre-trained network
# A saved network trained on a large dataset for classification task.
# -> ImageNet (1.4 million labeled images and 1000 different classes),VGG, ResNet, INception, Xception, etc...
# Part 1-Take the convolutional base of a previous trained network and running the data throught it
# Part 2- Train a new classifier on top of the output
# Why not reuse the classifier on the top?
# ->The representation learned by the classifier is specific to the set of classes the model was trained on.
# ->The densely connected layer no longer contain any information about where the object are located.
# Representation extracted by specific convolution layers depends on the depth of the layer in the model
# layers that comes earlier in the model extract generic feature maps : edges, lolor, textures
# layers higher-up extract abstract concepts : cat ear, dog eye.
# Part 1-Take the convolutional base of a previous trained network
import keras
# Instantiate the VGG16 model
# include_top=false not include the top of the network.
from keras.applications import VGG16
conv_base=VGG16(weights='imagenet',
include_top=false,
input_shape=(150,150,3))
conv_base.summary()
#-> the final feature map has shape (4,4,512)
#-> that the features on the top of which we stick a densely-connected classifier.
# Part 1......and running the data throught it
# Extract features from theses images calling the predicted methods of the conv_base model
# import the dataset
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
# à modifier
base_dir = '/Users/fchollet/Downloads/cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20
# Create extract features function
def extract_features(directory, sample_count):
# 4, 4, 512 -> the final feature map of conv_base has shape (4,4,512)
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
# pre-processing of the images with datagen.flow_from_directory
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator:
# extract the features from the conv_base with conv_base.predict
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
# Note that since generators yield data indefinitely in a loop,
# we must `break` after every image has been seen once.
break
return features, labels
# Apply extractct feature function to the training, test, validation images dataset.
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
# shape of the extracted features (samples, 4, 4 , 512)
# -> must be flattened to (samples, 8192)
train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
# Part 2- Train a new classifier on top of the output
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(train_features, train_labels,
epochs=30,
batch_size=20,
validation_data=(validation_features, validation_labels))
| 37.22314 | 110 | 0.720693 | [
"MIT"
] | MagaliDrumare/How-to-learn-Keras-Deep-Learning-with-Python-book- | 08_PreTrainedConvNet.py | 4,505 | Python |
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
"""
Using the plotter:
Call it from the command line, and supply it with logdirs to experiments.
Suppose you ran an experiment with name 'test', and you ran 'test' for 10
random seeds. The runner code stored it in the directory structure
data
L test_EnvName_DateTime
L 0
L log.txt
L params.json
L 1
L log.txt
L params.json
.
.
.
L 9
L log.txt
L params.json
To plot learning curves from the experiment, averaged over all random
seeds, call
python plot.py data/test_EnvName_DateTime --value AverageReturn
and voila. To see a different statistics, change what you put in for
the keyword --value. You can also enter /multiple/ values, and it will
make all of them in order.
Suppose you ran two experiments: 'test1' and 'test2'. In 'test2' you tried
a different set of hyperparameters from 'test1', and now you would like
to compare them -- see their learning curves side-by-side. Just call
python plot.py data/test1 data/test2
and it will plot them both! They will be given titles in the legend according
to their exp_name parameters. If you want to use custom legend titles, use
the --legend flag and then provide a title for each logdir.
"""
def plot_data(data, value="AverageReturn"):
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
sns.set(style="darkgrid", font_scale=1.5)
# print(data)
sns.tsplot(data=data, time="Iteration", value=value, unit="Unit", condition="Condition")
plt.legend(loc='best').draggable()
plt.show()
def get_datasets(fpath, condition=None):
unit = 0
datasets = []
for root, dir, files in os.walk(fpath):
if 'log.txt' in files:
param_path = open(os.path.join(root, 'params.json'))
params = json.load(param_path)
exp_name = params['exp_name']
log_path = os.path.join(root, 'log.txt')
experiment_data = pd.read_table(log_path)
experiment_data.insert(
len(experiment_data.columns),
'Unit',
unit
)
experiment_data.insert(
len(experiment_data.columns),
'Condition',
condition or exp_name
)
datasets.append(experiment_data)
unit += 1
return datasets
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--legend', nargs='*')
parser.add_argument('--value', default='AverageReturn', nargs='*')
args = parser.parse_args()
use_legend = False
if args.legend is not None:
assert len(args.legend) == len(args.logdir), \
"Must give a legend title for each set of experiments."
use_legend = True
data = []
if use_legend:
for logdir, legend_title in zip(args.logdir, args.legend):
data += get_datasets(logdir, legend_title)
else:
for logdir in args.logdir:
data += get_datasets(logdir)
if isinstance(args.value, list):
values = args.value
else:
values = [args.value]
for value in values:
plot_data(data, value=value)
if __name__ == "__main__":
main()
| 28.616667 | 92 | 0.629004 | [
"MIT"
] | tiagokv/drlberkeley | hw2/plot.py | 3,434 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-02 22:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('database', '0037_auto_20170901_0917'),
]
operations = [
migrations.RenameModel(
old_name='Company',
new_name='CompanySettings',
),
]
| 20.3 | 48 | 0.625616 | [
"MIT"
] | ACLARKNET/aclarknet-database | aclarknet/database/migrations/0038_auto_20170902_1829.py | 406 | Python |
print("Hello nader from python")
num = 101
if (num % 2) == 0:
print("{0} is Even number".format(num))
else:
print("{0} is Odd number".format(num))
| 17.444444 | 43 | 0.605096 | [
"MIT"
] | mickknutson/SITE_BOOTCAMP_QA | StudentWork/Nader/python/main.py | 157 | Python |
# Distributed under the MIT License.
# See LICENSE.txt for details.
from spectre.Visualization.GenerateXdmf import generate_xdmf
import spectre.Informer as spectre_informer
import unittest
import os
# For Py2 compatibility
try:
unittest.TestCase.assertRaisesRegex
except AttributeError:
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
class TestGenerateXdmf(unittest.TestCase):
def test_generate_xdmf(self):
data_file_prefix = os.path.join(spectre_informer.unit_test_path(),
'Visualization/Python', 'VolTestData')
output_filename = 'Test_GenerateXdmf_output'
if os.path.isfile(output_filename + '.xmf'):
os.remove(output_filename + '.xmf')
generate_xdmf(file_prefix=data_file_prefix,
output=output_filename,
subfile_name="element_data",
start_time=0.,
stop_time=1.,
stride=1,
coordinates='InertialCoordinates')
# The script is quite opaque right now, so we only test that we can run
# it and it produces output without raising an error. To test more
# details, we should refactor the script into smaller units.
self.assertTrue(os.path.isfile(output_filename + '.xmf'))
os.remove(output_filename + '.xmf')
def test_subfile_not_found(self):
data_file_prefix = os.path.join(spectre_informer.unit_test_path(),
'Visualization/Python', 'VolTestData')
output_filename = 'Test_GenerateXdmf_subfile_not_found'
if os.path.isfile(output_filename + '.xmf'):
os.remove(output_filename + '.xmf')
with self.assertRaisesRegex(ValueError, 'Could not open subfile'):
generate_xdmf(file_prefix=data_file_prefix,
output=output_filename,
subfile_name="unknown_subfile",
start_time=0.,
stop_time=1.,
stride=1,
coordinates='InertialCoordinates')
if __name__ == '__main__':
unittest.main(verbosity=2)
| 38.672414 | 79 | 0.617477 | [
"MIT"
] | Ambrou/spectre | tests/Unit/Visualization/Python/Test_GenerateXdmf.py | 2,243 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Pint integration helpers
# (C) 2016 VRT Systems
#
from pint import UnitRegistry
HAYSTACK_CONVERSION = [
(u'_', ' '),
(u'°','deg'),
(u'per ', '/ '),
(u'per_h','per_hour'),
(u'Δ','delta_'),
(u'meters','meter'),
(u'liters','liter'),
(u'gallons','gallon'),
(u'millimeters','millimeter'),
(u'centimeters','centimeter'),
(u'H₂O', 'H2O'),
(u'Volt', 'volt'),
(u'grams', 'gram'),
(u'tons refrigeration', 'refrigeration_ton'),
(u'%', 'percent'),
(u'degree kelvin','degK'),
(u'degree celsius','degC'),
(u'degree farenheit','degF'),
(u'pound force', 'pound_force'),
(u'metric ton', 'metric_ton'),
(u'fluid ounce', 'fluid_ounce'),
(u'imperial gallon','imperial_gallon'),
(u'galUK','UK_gallon'),
(u'kgdegK','(kg degK)'),
(u'tonrefh','refrigeration_ton * hour'),
(u'tonref','refrigeration_ton'),
(u'Nm ', 'newton meter'),
(u'Ns', 'newton second'),
(u'Js', 'joule second'),
(u'short ton', 'short_ton'),
(u'degrees angular', 'deg'),
(u'degrees phase', 'deg'),
(u'degPh', 'deg'),
(u'yr','year '),
(u'atmosphere', 'atm'),
(u'mo','month '),
(u'wk','week '),
(u'parts / unit','ppu'),
(u'parts / million','ppm'),
(u'parts / billion','ppb'),
(u'kcfm','kilocfm'),
(u'kilohm','kiloohm'),
(u'megohm','megaohm'),
(u'volt ampere reactive', 'VAR'),
(u'kilovolt ampere reactive', 'kVAR'),
(u'megavolt ampere reactive', 'MVAR'),
(u'VAh', 'volt * ampere * hour'),
(u'kVAh', 'kilovolt * ampere * hour'),
(u'MVAh', 'megavolt * ampere * hour'),
(u'VARh', 'VAR * hour'),
(u'kVARh', 'kVAR * hour'),
(u'MVARh', 'MVAR * hour'),
(u'hph', 'horsepower * hour'),
(u'energy efficiency ratio', 'EER'),
(u'coefficient of performance', 'COP'),
(u'data center infrastructure efficiency', 'DCIE'),
(u'power usage effectiveness', 'PUE'),
(u'formazin nephelometric unit', 'fnu'),
(u'nephelometric turbidity units', 'ntu'),
(u'dBµV', 'dB microvolt'),
(u'dBmV', 'dB millivolt'),
(u'db','dB'),
(u'Am', 'A * m'),
(u'percent relative humidity', 'percentRH'),
(u'pf', 'PF'),
(u'power factor', 'PF'),
(u'gH2O','g H2O'),
(u'irradiance',''),
(u'irr',''),
(u'dry air', 'dry'),
(u'dry', 'dry_air'),
(u'kgAir','kg dry_air'),
(u'percent obscuration', 'percentobsc'),
(u'natural gas', ''),
(u'Ωm', 'ohm meter'),
(u'hecto cubic foot', 'hecto_cubic_foot'),
(u'julian month','month'),
(u'tenths second', 'tenths_second'),
(u'hundredths second', 'hundredths_second'),
(u'australian dollar','australian_dollar'),
(u'british pound','british_pound'),
(u'canadian dollar','canadian_dollar'),
(u'chinese yuan','chinese_yuan'),
(u'emerati dirham','emerati_dirham'),
(u'indian rupee','indian_rupee'),
(u'japanese yen','japanese_yen'),
(u'russian ruble','russian_ruble'),
(u'south korean won','south_korean_won'),
(u'swedish krona','swedish_krona'),
(u'swiss franc','swiss_franc'),
(u'taiwan dollar','taiwan_dollar'),
(u'us dollar','us_dollar'),
(u'new israeli shekel','new_israeli_shekel'),
(u'delta_K', 'delta_degC'),
(u'delta degK', 'delta_degC'),
(u'delta degC', 'delta_degC'),
(u'delta degF', 'delta_degF'),
(u'$', 'USD'),
(u'£', 'GBP'),
(u'元', 'CNY'),
(u'€', 'EUR'),
(u'₹', 'INR'),
(u'¥', 'JPY'),
(u'₩', 'KRW'),
(u'of','')
]
PINT_CONVERSION = [
(u'foot ** 3', 'cubic_foot'),
(u'/','per'),
(u'hectofoot ** 3','hecto_cubic_foot'),
(u'meter ** 3','cubic_meter'),
(u'Volt_per','volts_per'),
(u'°ree','degree')
]
def to_haystack(unit):
"""
Some parsing tweaks to fit pint units / handling of edge cases.
"""
global HAYSTACK_CONVERSION
global PINT_CONVERSION
if unit == u'per_minute' or \
unit == u'/min' or \
unit == u'per_second' or \
unit == u'/s' or \
unit == u'per_hour' or \
unit == u'/h' or \
unit == None:
return u''
# Those units are not units... they are impossible to fit anywhere in Pint
for pint_value, haystack_value in PINT_CONVERSION:
unit = unit.replace(pint_value, haystack_value)
for haystack_value, pint_value in HAYSTACK_CONVERSION:
if pint_value == u'':
continue
unit = unit.replace(pint_value, haystack_value)
return unit
def to_pint(unit):
"""
Some parsing tweaks to fit pint units / handling of edge cases.
"""
global HAYSTACK_CONVERSION
if unit == u'per_minute' or \
unit == u'/min' or \
unit == u'per_second' or \
unit == u'/s' or \
unit == u'per_hour' or \
unit == u'/h' or \
unit == None:
return ''
# Those units are not units... they are impossible to fit anywhere in Pint
for haystack_value, pint_value in HAYSTACK_CONVERSION:
unit = unit.replace(haystack_value, pint_value)
return unit
def define_haystack_units():
"""
Missing units found in project-haystack
Added to the registry
"""
ureg = UnitRegistry(on_redefinition='ignore')
ureg.define(u'% = [] = percent')
ureg.define(u'pixel = [] = px = dot = picture_element = pel')
ureg.define(u'decibel = [] = dB')
ureg.define(u'ppu = [] = parts_per_unit')
ureg.define(u'ppm = [] = parts_per_million')
ureg.define(u'ppb = [] = parts_per_billion')
ureg.define(u'%RH = [] = percent_relative_humidity = percentRH')
ureg.define(u'cubic_feet = ft ** 3 = cu_ft')
ureg.define(u'cfm = cu_ft * minute = liter_per_second / 0.4719475')
ureg.define(u'cfh = cu_ft * hour')
ureg.define(u'cfs = cu_ft * second')
ureg.define(u'VAR = volt * ampere')
ureg.define(u'kVAR = 1000 * volt * ampere')
ureg.define(u'MVAR = 1000000 * volt * ampere')
ureg.define(u'inH2O = in_H2O')
ureg.define(u'dry_air = []')
ureg.define(u'gas = []')
ureg.define(u'energy_efficiency_ratio = [] = EER')
ureg.define(u'coefficient_of_performance = [] = COP')
ureg.define(u'data_center_infrastructure_efficiency = [] = DCIE')
ureg.define(u'power_usage_effectiveness = [] = PUE')
ureg.define(u'formazin_nephelometric_unit = [] = fnu')
ureg.define(u'nephelometric_turbidity_units = [] = ntu')
ureg.define(u'power_factor = [] = PF')
ureg.define(u'degree_day_celsius = [] = degdaysC')
ureg.define(u'degree_day_farenheit = degree_day_celsius * 9 / 5 = degdaysF')
ureg.define(u'footcandle = lumen / sq_ft = ftcd')
ureg.define(u'Nm = newton * meter')
ureg.define(u'%obsc = [] = percent_obscuration = percentobsc')
ureg.define(u'cycle = []')
ureg.define(u'cph = cycle / hour')
ureg.define(u'cpm = cycle / minute')
ureg.define(u'cps = cycle / second')
ureg.define(u'hecto_cubic_foot = 100 * cubic_foot')
ureg.define(u'tenths_second = second / 10')
ureg.define(u'hundredths_second = second / 100')
#ureg.define('irradiance = W / sq_meter = irr')
# In the definition of project haystack, there's a redundancy as irr = W/m^2
# no need to use : watts_per_square_meter_irradiance
# CURRENCY
# I know...we won'T be able to convert right now !
ureg.define(u'australian_dollar = [] = AUD')
ureg.define(u'british_pound = [] = GBP = £')
ureg.define(u'canadian_dollar = [] = CAD')
ureg.define(u'chinese_yuan = [] = CNY = 元')
ureg.define(u'emerati_dirham = [] = AED')
ureg.define(u'euro = [] = EUR = €')
ureg.define(u'indian_rupee = [] = INR = ₹')
ureg.define(u'japanese_yen = [] = JPY = ¥')
ureg.define(u'russian_ruble = [] = RUB = руб')
ureg.define(u'south_korean_won = [] = KRW = ₩')
ureg.define(u'swedish_krona = [] = SEK = kr')
ureg.define(u'swiss_franc = [] = CHF = Fr')
ureg.define(u'taiwan_dollar = [] = TWD')
ureg.define(u'us_dollar = [] = USD = $')
ureg.define(u'new_israeli_shekel = [] = NIS')
return ureg
| 42.012766 | 82 | 0.474526 | [
"BSD-2-Clause"
] | clarsen/hszinc | hszinc/pintutil.py | 9,903 | Python |
""" .. _Line-api:
**Line** --- Spectral line metadata.
------------------------------------
This module defines the Line class for LINE entries in BDPs.
"""
# system imports
import xml.etree.cElementTree as et
# ADMIT imports
import bdp_types as bt
from UtilBase import UtilBase
class Line(UtilBase):
""" Class for holding information on a specific spectral line.
Parameters
----------
keyval : dict
Dictionary of keyword:value pairs.
Attributes
----------
name : str
Name of the molecule/atom.
Default: "".
uid : str
Unique identifier for the transition.
Default: "".
formula : str
The chemical formula.
Default: "".
transition : str
The transition/quantum number information.
Default: "".
energies : 2 element list
List of the lower and upper state energies of the transition.
Default: [0.0, 0.0].
energyunits : str
Units of the upper/lower state energy.
Default: K.
linestrength : float
The line strength of the transition.
Default: 0.0.
lsunits : str
The units of the line strength.
Default: "Debye^2".
frequency : float
The frequency of the transition.
Default: 0.0.
funits : str
The units of the frequency.
Default: "GHz".
blend : int
If this molecule is blended with others. Value of 0 means no blending
any other value gives the index of the blend.
Default: 0 (no blending).
"""
def __init__(self, **keyval):
self.name = ""
self.uid = ""
self.formula = ""
self.transition = ""
self.energies = [0.0, 0.0]
self.energyunits = "K"
self.linestrength = 0.0
self.lsunits = "Debye^2"
self.frequency = 0.0
self.funits = "GHz"
self.blend = 0
UtilBase.__init__(self, **keyval)
def setupperenergy(self, value):
""" Method to set the upper state energy.
Parameters
----------
value : float
The value to set the upper state energy to.
Returns
-------
None
"""
if isinstance(value, float) :
self.energies[1] = value
elif isinstance(value, int) :
self.energies[1] = float(value)
else :
raise Exception("Energy must be a number")
def setlowerenergy(self, value):
""" Method to set the lower state energy.
Parameters
----------
value : float
The value to set the lower state energy to.
Returns
-------
None
"""
if isinstance(value, float) :
self.energies[0] = value
elif isinstance(value, int) :
self.energies[0] = float(value)
else :
raise Exception("Energy must be a number")
def getlowerenergy(self) :
""" Method to get the lower state energy.
Parameters
----------
None
Returns
-------
Float of the lower state energy.
"""
return self.energies[0]
def getupperenergy(self):
""" Method to get the upper state energy.
Parameters
----------
None
Returns
-------
Float of the upper state energy.
"""
return self.energies[1]
def setkey(self, name="", value=""):
"""
set keys, two styles are possible:
1. name = {key:val} e.g. **setkey({"a":1})**
2. name = "key", value = val e.g. **setkey("a", 1)**
This method checks the type of the keyword value, as it must
remain the same. Also new keywords cannot be added.
Parameters
----------
name : dictionary or string
Dictionary of keyword value pais to set or a string with the name
of a single key
value : any
The value to change the keyword to
Returns
-------
None
"""
if isinstance(name, dict):
for k, v in name.iteritems():
if hasattr(self, k):
if type(v) == type(getattr(self, k)):
if k == "energies" and not isinstance(v, list) and len(v) != 2:
raise Exception("Energies must be a list in the format [lower, upper], use setupperenergy or setlowerenergy to set them individually.")
setattr(self, k, v)
else:
raise Exception("Cannot change data type for %s, expected %s but got %s" % (k, str(type(getattr(self, k))), str(type(v))))
else:
raise Exception("Invalid key given to Line class: %s" % (k))
elif not name == "":
if hasattr(self, name):
if type(value) == type(getattr(self, name)):
if name == "energies" and not isinstance(value, list) and len(value) != 2:
raise Exception("Energies must be a list in the format [lower, upper], use setupperenergy or setlowerenergy to set them individually.")
setattr(self, name, value)
else:
raise Exception("Cannot change data type for %s, expected %s but got %s" % (name, str(type(getattr(self, name))), str(type(value))))
else:
raise Exception("Invalid key given to Line class: %s" % (name))
else:
raise Exception("Invalid name parameter given, it must be a string or a dictionary of keys:values.")
def isequal(self, line):
""" Experimental method to compare 2 line classes
Parameters
----------
line : Line
The class to compare this one to.
Returns
-------
Boolean whether or not the two classes contain the same data.
"""
try:
for i in self.__dict__:
if cmp(getattr(self, i), getattr(line, i)) != 0:
return False
except:
return False
return True
| 29.813636 | 163 | 0.495197 | [
"MIT"
] | astroumd/admit | admit/util/Line.py | 6,559 | Python |
algorithm='hc'
env_class='UnityMLVector'
model_class='SingleLayerPerceptron'
environment = {
'name': 'compiled_unity_environments/Banana.app'
}
model = {
'state_size': 37,
'action_size': 4
}
agent = {
'action_size': 4,
'policy': 'stochastic'
}
train = {
'n_episodes': 2000,
'solve_score': 13.0,
'npop': 6
}
| 14.291667 | 52 | 0.629738 | [
"MIT"
] | danielnbarbosa/angela | cfg/unity/banana/banana_hc.py | 343 | Python |
from functools import partial
from typing import List, Optional, Sequence, cast
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
from kartothek.core.typing import StoreFactory
from kartothek.io.dask.compression import pack_payload, unpack_payload_pandas
from kartothek.io_components.metapartition import MetaPartition
from kartothek.io_components.write import write_partition
from kartothek.serialization import DataFrameSerializer
_KTK_HASH_BUCKET = "__KTK_HASH_BUCKET"
def _hash_bucket(df: pd.DataFrame, subset: Optional[Sequence[str]], num_buckets: int):
"""
Categorize each row of `df` based on the data in the columns `subset`
into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`
"""
if not subset:
subset = df.columns
hash_arr = pd.util.hash_pandas_object(df[subset], index=False)
buckets = hash_arr % num_buckets
available_bit_widths = np.array([8, 16, 32, 64])
mask = available_bit_widths > np.log2(num_buckets)
bit_width = min(available_bit_widths[mask])
return df.assign(**{_KTK_HASH_BUCKET: buckets.astype(f"uint{bit_width}")})
def shuffle_store_dask_partitions(
ddf: dd.DataFrame,
table: str,
secondary_indices: List[str],
metadata_version: int,
partition_on: List[str],
store_factory: StoreFactory,
df_serializer: Optional[DataFrameSerializer],
dataset_uuid: str,
num_buckets: int,
sort_partitions_by: List[str],
bucket_by: Sequence[str],
) -> da.Array:
"""
Perform a dataset update with dask reshuffling to control partitioning.
The shuffle operation will perform the following steps
1. Pack payload data
Payload data is serialized and compressed into a single byte value using
``distributed.protocol.serialize_bytes``, see also ``pack_payload``.
2. Apply bucketing
Hash the column subset ``bucket_by`` and distribute the hashes in
``num_buckets`` bins/buckets. Internally every bucket is identified by an
integer and we will create one physical file for every bucket ID. The
bucket ID is not exposed to the user and is dropped after the shuffle,
before the store. This is done since we do not want to guarantee at the
moment, that the hash function remains stable.
3. Perform shuffle (dask.DataFrame.groupby.apply)
The groupby key will be the combination of ``partition_on`` fields and the
hash bucket ID. This will create a physical file for every unique tuple
in ``partition_on + bucket_ID``. The function which is applied to the
dataframe will perform all necessary subtask for storage of the dataset
(partition_on, index calc, etc.).
4. Unpack data (within the apply-function)
After the shuffle, the first step is to unpack the payload data since
the follow up tasks will require the full dataframe.
5. Pre storage processing and parquet serialization
We apply important pre storage processing like sorting data, applying
final partitioning (at this time there should be only one group in the
payload data but using the ``MetaPartition.partition_on`` guarantees the
appropriate data structures kartothek expects are created.).
After the preprocessing is done, the data is serialized and stored as
parquet. The applied function will return an (empty) MetaPartition with
indices and metadata which will then be used to commit the dataset.
Returns
-------
A dask.Array holding relevant MetaPartition objects as values
"""
if ddf.npartitions == 0:
return ddf
group_cols = partition_on.copy()
if num_buckets is None:
raise ValueError("``num_buckets`` must not be None when shuffling data.")
meta = ddf._meta
meta[_KTK_HASH_BUCKET] = np.uint64(0)
ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)
group_cols.append(_KTK_HASH_BUCKET)
unpacked_meta = ddf._meta
ddf = pack_payload(ddf, group_key=group_cols)
ddf_grouped = ddf.groupby(by=group_cols)
unpack = partial(
_unpack_store_partition,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
table=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
unpacked_meta=unpacked_meta,
)
return cast(
da.Array, # Output type depends on meta but mypy cannot infer this easily.
ddf_grouped.apply(unpack, meta=("MetaPartition", "object")),
)
def _unpack_store_partition(
df: pd.DataFrame,
secondary_indices: List[str],
sort_partitions_by: List[str],
table: str,
dataset_uuid: str,
partition_on: List[str],
store_factory: StoreFactory,
df_serializer: DataFrameSerializer,
metadata_version: int,
unpacked_meta: pd.DataFrame,
) -> MetaPartition:
"""Unpack payload data and store partition"""
df = unpack_payload_pandas(df, unpacked_meta)
if _KTK_HASH_BUCKET in df:
df = df.drop(_KTK_HASH_BUCKET, axis=1)
return write_partition(
partition_df=df,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
dataset_table_name=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
)
| 35.301887 | 86 | 0.716195 | [
"MIT"
] | MartinHaffner/kartothek | kartothek/io/dask/_shuffle.py | 5,613 | Python |
from operator import ge
from typing import List, Optional, Tuple # Dict,
from fastapi import FastAPI, HTTPException, Depends, Query, status
from fastapi.templating import Jinja2Templates
from pathlib import Path
from fastapi import Request # , Response
# from fastapi.responses import JSONResponse
# from pymongo.common import validate_server_api_or_none
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
import datetime as dt
from app.core.config import get_logger
from todoer_api.model import (
Task,
TodoerInfo,
TaskCreate,
TaskUpdate,
TaskPartialUpdate,
ObjectId,
)
from todoer_api.data_layer import (
TaskDatabase,
DataLayerException,
database_factory,
)
from todoer_api import __version__, __service_name__
logger = get_logger("todoer")
BASE_PATH = Path(__file__).resolve().parent
TEMPLATES = Jinja2Templates(directory=str(BASE_PATH / "templates"))
# ------------------------------------------------------------------------------
app = FastAPI()
# ------------------------------------------------------------------------------
# task_db: TaskDatabase = database_factory("mongo") # None
task_db: TaskDatabase = database_factory("mongo")
# async def build_database() -> TaskDatabase:
# return database_factory("mongo")
async def get_database() -> TaskDatabase:
# !!! for some reason when trying to saccess the DB via the data layer
# it creates an error: attached to a different loop
# don't know why left it to a local variable in main
# global task_db
return task_db
def pagination(
skip: int = Query(0, ge=0),
limit: int = Query(10, ge=0),
) -> Tuple[int, int]:
capped_limit = min(100, limit)
return (skip, capped_limit)
async def get_task_or_404(task_key: str, database=Depends(get_database)) -> Task:
try:
return await database.get(task_key)
except DataLayerException:
raise HTTPException(status_code=404, detail=f"Task {task_key} not found")
# ------------------------------------------------------------------------------
# database_builder=Depends(build_database)
@app.on_event("startup")
async def startup():
global task_db
# await
task_db = database_factory("mongo") #
@app.on_event("shutdown")
async def shutdown():
global task_db
del task_db
task_db = None
@app.get("/todoer/v1/tasks", status_code=200)
async def root(
request: Request,
database=Depends(get_database),
pagination: Tuple[int, int] = Depends(pagination),
) -> dict: # 2
"""
GET tasks as html page
"""
tasks = await database.get_all(*pagination)
return TEMPLATES.TemplateResponse(
"index.html",
{"request": request, "tasks": tasks},
)
@app.get("/todoer/api/v1/ping")
async def model_ping():
return {"ping": dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
@app.get("/todoer/api/v1/info", response_model=TodoerInfo)
async def model_info(database=Depends(get_database)) -> TodoerInfo:
logger.info(f"get info")
return TodoerInfo(
timestamp=dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
service=__service_name__,
data_source=database.db_type,
version=__version__,
)
@app.get("/todoer/api/v1/tests/{test_id}")
async def test(test_id: int, qry: Optional[str] = None):
logger.info(f"in test id={test_id} qry={qry}")
return {"test_id": test_id, "q": qry}
@app.get("/todoer/api/v1/tasks")
async def get_tasks(
pagination: Tuple[int, int] = Depends(pagination), database=Depends(get_database)
) -> List[Task]:
return await database.get_all(*pagination)
@app.get("/todoer/api/v1/tasks/{task_key}", response_model=Task)
async def get_task_id(task: Task = Depends(get_task_or_404)) -> Task:
return task
@app.post("/todoer/api/v1/tasks", status_code=201, response_model=Task)
async def create_task(task: TaskCreate, database=Depends(get_database)) -> Task:
try:
logger.info(f"request to create task in project {task.project}")
added_task = await database.add(task)
return added_task
except DataLayerException:
raise HTTPException(
status_code=409, detail=f"Adding task key {task.key} failed, already exists"
)
@app.put("/todoer/api/v1/tasks/{task_key}", response_model=Task)
async def update_task(
task_key: str, task: TaskUpdate, database=Depends(get_database)
) -> Task:
try:
logger.info(f"request to update task: {task_key}")
udp_task = await database.update(task_key, task)
return udp_task
except DataLayerException:
raise HTTPException(status_code=404, detail=f"Task {task_key} not found")
@app.patch("/todoer/api/v1/tasks/{task_key}", response_model=Task)
async def patch_task(
task_key: str, task: TaskPartialUpdate, database=Depends(get_database)
) -> Task:
try:
logger.info(f"request to patch task: {task_key}")
return await database.update(task_key, task)
except DataLayerException:
raise HTTPException(status_code=404, detail=f"Task {task_key} not found")
@app.delete("/todoer/api/v1/tasks/{task_key}", status_code=204)
async def del_task(task_key: str, database=Depends(get_database)) -> None:
try:
logger.info(f"request to delete task: {task_key}")
await database.delete(task_key)
except DataLayerException:
raise HTTPException(status_code=404, detail=f"Delete task {task_key} not found")
@app.delete("/todoer/admin/v1/tasks", status_code=204)
async def del_all_task(database=Depends(get_database)):
try:
logger.info("request to delete all tasks")
await database.delete_all()
except DataLayerException:
raise HTTPException(status_code=404, detail=f"Failed to delete all tasks")
| 30.114583 | 88 | 0.672778 | [
"MIT"
] | owlsong/todoer | todoer_api/app/main.py | 5,782 | Python |
import json
from symro.src.automenu import Command
class SpecialCommand(Command):
def __init__(self,
symbol: str,
line_index: int = -1):
super(SpecialCommand, self).__init__()
self.symbol: str = symbol
self.line_index: int = line_index
def __str__(self) -> str:
arg_tokens = []
for arg in self.get_ordered_args():
arg_tokens.append(str(arg))
for name, value in self.get_named_args().items():
if isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value)
arg_tokens.append("{0}={1}".format(name, value))
arg_str = ""
if len(arg_tokens) > 0:
arg_str = "(" + ", ".join(arg_tokens) + ")"
return "@{0}{1}".format(self.symbol, arg_str)
| 27.833333 | 66 | 0.559281 | [
"MIT"
] | ari-bou/symro | src/scripting/specialcommand.py | 835 | Python |
import os
import numpy as np
from keras import backend as K
from keras.losses import mean_absolute_error
import utils
from model import wdsr_b
def psnr(hr, sr, max_val=2):
mse = K.mean(K.square(hr - sr))
return 10.0 / np.log(10) * K.log(max_val ** 2 / mse)
def data_generator(path, batch_size=8, input_shape=96, scale=2):
'''data generator for fit_generator'''
fns = os.listdir(path)
n = len(fns)
i = 0
while True:
lrs, hrs = [], []
for b in range(batch_size):
if i == 0:
np.random.shuffle(fns)
fn = fns[i]
fn = os.path.join(path, fn)
lr, hr = utils.pair(fn, input_shape, scale)
lr = utils.normalization(lr)
hr = utils.normalization(hr)
lrs.append(lr)
hrs.append(hr)
i = (i + 1) % n
lrs = np.array(lrs)
hrs = np.array(hrs)
yield lrs, hrs
model = wdsr_b()
model.compile(optimizer='adam',
loss=mean_absolute_error, metrics=[psnr])
model.fit_generator(data_generator('./datasets/train/'),
steps_per_epoch=50,
epochs=1250)
| 26.066667 | 64 | 0.55925 | [
"MIT"
] | zhaipro/keras-wdsr | src/train.py | 1,173 | Python |
""" Git Branch Merge Target Model tests """
from django.test import TestCase
from django.test import Client
from django.conf import settings
from django.utils import timezone
from app.logic.gitrepo.models.GitProjectModel import GitProjectEntry
from app.logic.gitrepo.models.GitBranchModel import GitBranchEntry
from app.logic.gitrepo.models.GitCommitModel import GitCommitEntry
from app.logic.gitrepo.models.GitUserModel import GitUserEntry
from app.logic.gitrepo.models.GitDiffModel import GitDiffEntry
from app.logic.gitrepo.models.GitBranchMergeTargetModel import GitBranchMergeTargetEntry
from datetime import timedelta
import os
import hashlib
import shutil
# Create your tests here.
class GitBranchMergeTargetTestCase(TestCase):
def setUp(self):
self.git_project1 = GitProjectEntry.objects.create(url='http://test/')
self.git_user1 = GitUserEntry.objects.create(
project=self.git_project1,
name='user1',
email='[email protected]'
)
self.git_commit1 = GitCommitEntry.objects.create(
project=self.git_project1,
commit_hash='0000100001000010000100001000010000100001',
author=self.git_user1,
author_date=timezone.now(),
committer=self.git_user1,
committer_date=timezone.now()
)
self.git_commit2 = GitCommitEntry.objects.create(
project=self.git_project1,
commit_hash='0000200002000020000200002000020000200002',
author=self.git_user1,
author_date=timezone.now(),
committer=self.git_user1,
committer_date=timezone.now()
)
self.git_branch1 = GitBranchEntry.objects.create(
project=self.git_project1,
commit=self.git_commit1,
name='branch1'
)
self.git_branch2 = GitBranchEntry.objects.create(
project=self.git_project1,
commit=self.git_commit2,
name='branch2'
)
self.git_diff1 = GitDiffEntry.objects.create(
project=self.git_project1,
commit_son=self.git_commit1,
commit_parent=self.git_commit2,
content='content-text'
)
def tearDown(self):
pass
def test_create_git_branch_merge_target_entry(self):
entry = GitBranchMergeTargetEntry.objects.create(
project=self.git_project1,
current_branch=self.git_branch1,
target_branch=self.git_branch2,
fork_point=self.git_commit2,
diff=self.git_diff1,
)
self.assertEqual('http://test/', entry.project.url)
self.assertEqual('branch1', entry.current_branch.name)
self.assertEqual('branch2', entry.target_branch.name)
self.assertEqual('0000200002000020000200002000020000200002', entry.fork_point.commit_hash)
self.assertEqual('content-text', entry.diff.content)
self.assertEqual(False, entry.invalidated)
| 34.079545 | 98 | 0.679226 | [
"MIT"
] | imvu/bluesteel | app/logic/gitrepo/tests/tests_model_GitBranchMergeTargetModel.py | 2,999 | Python |
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import os
from decimal import Decimal
import random
import time
from typing import (Optional, Sequence, Tuple, List, Set, Dict, TYPE_CHECKING,
NamedTuple, Union, Mapping, Any, Iterable)
import threading
import socket
import aiohttp
import json
from datetime import datetime, timezone
from functools import partial
from collections import defaultdict
import concurrent
from concurrent import futures
import urllib.parse
import dns.resolver
import dns.exception
from aiorpcx import run_in_thread, TaskGroup, NetAddress, ignore_after
from . import constants, util
from . import keystore
from .util import profiler, chunks
from .invoices import PR_TYPE_LN, PR_UNPAID, PR_EXPIRED, PR_PAID, PR_INFLIGHT, PR_FAILED, PR_ROUTING, LNInvoice, LN_EXPIRY_NEVER
from .util import NetworkRetryManager, JsonRPCClient
from .lnutil import LN_MAX_FUNDING_SAT
from .keystore import BIP32_KeyStore
from .ravencoin import COIN
from .ravencoin import opcodes, make_op_return, address_to_scripthash
from .transaction import Transaction
from .transaction import get_script_type_from_output_script
from .crypto import sha256
from .bip32 import BIP32Node
from .util import bh2u, bfh, InvoiceError, resolve_dns_srv, is_ip_address, log_exceptions
from .crypto import chacha20_encrypt, chacha20_decrypt
from .util import ignore_exceptions, make_aiohttp_session, SilentTaskGroup
from .util import timestamp_to_datetime, random_shuffled_copy
from .util import MyEncoder, is_private_netaddress
from .logging import Logger
from .lntransport import LNTransport, LNResponderTransport, LNTransportBase
from .lnpeer import Peer, LN_P2P_NETWORK_TIMEOUT
from .lnaddr import lnencode, LnAddr, lndecode
from .ecc import der_sig_from_sig_string
from .lnchannel import Channel, AbstractChannel
from .lnchannel import ChannelState, PeerState
from .lnrater import LNRater
from . import lnutil
from .lnutil import funding_output_script
from .ravencoin import redeem_script_to_address
from .lnutil import (Outpoint, LNPeerAddr,
get_compressed_pubkey_from_bech32, extract_nodeid,
PaymentFailure, split_host_port, ConnStringFormatError,
generate_keypair, LnKeyFamily, LOCAL, REMOTE,
MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE,
NUM_MAX_EDGES_IN_PAYMENT_PATH, SENT, RECEIVED, HTLCOwner,
UpdateAddHtlc, Direction, LnFeatures, ShortChannelID,
HtlcLog, derive_payment_secret_from_payment_preimage,
NoPathFound, InvalidGossipMsg)
from .lnutil import ln_dummy_address, ln_compare_features, IncompatibleLightningFeatures
from .lnrouter import TrampolineEdge
from .transaction import PartialTxOutput, PartialTransaction, PartialTxInput
from .lnonion import OnionFailureCode, OnionRoutingFailure
from .lnmsg import decode_msg
from .i18n import _
from .lnrouter import (RouteEdge, LNPaymentRoute, LNPaymentPath, is_route_sane_to_use,
NoChannelPolicy, LNPathInconsistent)
from .address_synchronizer import TX_HEIGHT_LOCAL
from . import lnsweep
from .lnwatcher import LNWalletWatcher
from .crypto import pw_encode_with_version_and_mac, pw_decode_with_version_and_mac
from .lnutil import ImportedChannelBackupStorage, OnchainChannelBackupStorage
from .lnchannel import ChannelBackup
from .channel_db import UpdateStatus
from .channel_db import get_mychannel_info, get_mychannel_policy
from .submarine_swaps import SwapManager
from .channel_db import ChannelInfo, Policy
from .mpp_split import suggest_splits
from .trampoline import create_trampoline_route_and_onion, TRAMPOLINE_FEES
if TYPE_CHECKING:
from .network import Network
from .wallet import Abstract_Wallet
from .channel_db import ChannelDB
from .simple_config import SimpleConfig
SAVED_PR_STATUS = [PR_PAID, PR_UNPAID] # status that are persisted
NUM_PEERS_TARGET = 4
# onchain channel backup data
CB_VERSION = 0
CB_MAGIC_BYTES = bytes([0, 0, 0, CB_VERSION])
FALLBACK_NODE_LIST_TESTNET = (
LNPeerAddr(host='203.132.95.10', port=9735, pubkey=bfh('038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9')),
LNPeerAddr(host='2401:d002:4402:0:bf1d:986a:7598:6d49', port=9735, pubkey=bfh('038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9')),
LNPeerAddr(host='50.116.3.223', port=9734, pubkey=bfh('03236a685d30096b26692dce0cf0fa7c8528bdf61dbf5363a3ef6d5c92733a3016')),
LNPeerAddr(host='3.16.119.191', port=9735, pubkey=bfh('03d5e17a3c213fe490e1b0c389f8cfcfcea08a29717d50a9f453735e0ab2a7c003')),
LNPeerAddr(host='34.250.234.192', port=9735, pubkey=bfh('03933884aaf1d6b108397e5efe5c86bcf2d8ca8d2f700eda99db9214fc2712b134')),
LNPeerAddr(host='88.99.209.230', port=9735, pubkey=bfh('0260d9119979caedc570ada883ff614c6efb93f7f7382e25d73ecbeba0b62df2d7')),
LNPeerAddr(host='160.16.233.215', port=9735, pubkey=bfh('023ea0a53af875580899da0ab0a21455d9c19160c4ea1b7774c9d4be6810b02d2c')),
LNPeerAddr(host='197.155.6.173', port=9735, pubkey=bfh('0269a94e8b32c005e4336bfb743c08a6e9beb13d940d57c479d95c8e687ccbdb9f')),
LNPeerAddr(host='2c0f:fb18:406::4', port=9735, pubkey=bfh('0269a94e8b32c005e4336bfb743c08a6e9beb13d940d57c479d95c8e687ccbdb9f')),
LNPeerAddr(host='163.172.94.64', port=9735, pubkey=bfh('030f0bf260acdbd3edcad84d7588ec7c5df4711e87e6a23016f989b8d3a4147230')),
LNPeerAddr(host='23.237.77.12', port=9735, pubkey=bfh('02312627fdf07fbdd7e5ddb136611bdde9b00d26821d14d94891395452f67af248')),
LNPeerAddr(host='197.155.6.172', port=9735, pubkey=bfh('02ae2f22b02375e3e9b4b4a2db4f12e1b50752b4062dbefd6e01332acdaf680379')),
LNPeerAddr(host='2c0f:fb18:406::3', port=9735, pubkey=bfh('02ae2f22b02375e3e9b4b4a2db4f12e1b50752b4062dbefd6e01332acdaf680379')),
LNPeerAddr(host='23.239.23.44', port=9740, pubkey=bfh('034fe52e98a0e9d3c21b767e1b371881265d8c7578c21f5afd6d6438da10348b36')),
LNPeerAddr(host='2600:3c01::f03c:91ff:fe05:349c', port=9740, pubkey=bfh('034fe52e98a0e9d3c21b767e1b371881265d8c7578c21f5afd6d6438da10348b36')),
)
FALLBACK_NODE_LIST_MAINNET = [
LNPeerAddr(host='172.81.181.3', port=9735, pubkey=bfh('0214382bdce7750dfcb8126df8e2b12de38536902dc36abcebdaeefdeca1df8284')),
LNPeerAddr(host='35.230.100.60', port=9735, pubkey=bfh('023f5e3582716bed96f6f26cfcd8037e07474d7b4743afdc8b07e692df63464d7e')),
LNPeerAddr(host='40.69.71.114', port=9735, pubkey=bfh('028303182c9885da93b3b25c9621d22cf34475e63c123942e402ab530c0556e675')),
LNPeerAddr(host='94.177.171.73', port=9735, pubkey=bfh('0276e09a267592e7451a939c932cf685f0754de382a3ca85d2fb3a864d4c365ad5')),
LNPeerAddr(host='34.236.113.58', port=9735, pubkey=bfh('02fa50c72ee1e2eb5f1b6d9c3032080c4c864373c4201dfa2966aa34eee1051f97')),
LNPeerAddr(host='52.50.244.44', port=9735, pubkey=bfh('030c3f19d742ca294a55c00376b3b355c3c90d61c6b6b39554dbc7ac19b141c14f')),
LNPeerAddr(host='157.245.68.47', port=9735, pubkey=bfh('03c2abfa93eacec04721c019644584424aab2ba4dff3ac9bdab4e9c97007491dda')),
LNPeerAddr(host='18.221.23.28', port=9735, pubkey=bfh('03abf6f44c355dec0d5aa155bdbdd6e0c8fefe318eff402de65c6eb2e1be55dc3e')),
LNPeerAddr(host='52.224.178.244', port=9735, pubkey=bfh('026b105ac13212c48714c6be9b11577a9ce10f10e1c88a45ce217e6331209faf8b')),
LNPeerAddr(host='34.239.230.56', port=9735, pubkey=bfh('03864ef025fde8fb587d989186ce6a4a186895ee44a926bfc370e2c366597a3f8f')),
LNPeerAddr(host='46.229.165.136', port=9735, pubkey=bfh('0390b5d4492dc2f5318e5233ab2cebf6d48914881a33ef6a9c6bcdbb433ad986d0')),
LNPeerAddr(host='157.230.28.160', port=9735, pubkey=bfh('0279c22ed7a068d10dc1a38ae66d2d6461e269226c60258c021b1ddcdfe4b00bc4')),
LNPeerAddr(host='74.108.13.152', port=9735, pubkey=bfh('0331f80652fb840239df8dc99205792bba2e559a05469915804c08420230e23c7c')),
LNPeerAddr(host='167.172.44.148', port=9735, pubkey=bfh('0395033b252c6f40e3756984162d68174e2bd8060a129c0d3462a9370471c6d28f')),
LNPeerAddr(host='138.68.14.104', port=9735, pubkey=bfh('03bb88ccc444534da7b5b64b4f7b15e1eccb18e102db0e400d4b9cfe93763aa26d')),
LNPeerAddr(host='3.124.63.44', port=9735, pubkey=bfh('0242a4ae0c5bef18048fbecf995094b74bfb0f7391418d71ed394784373f41e4f3')),
LNPeerAddr(host='2001:470:8:2e1::43', port=9735, pubkey=bfh('03baa70886d9200af0ffbd3f9e18d96008331c858456b16e3a9b41e735c6208fef')),
LNPeerAddr(host='2601:186:c100:6bcd:219:d1ff:fe75:dc2f', port=9735, pubkey=bfh('0298f6074a454a1f5345cb2a7c6f9fce206cd0bf675d177cdbf0ca7508dd28852f')),
LNPeerAddr(host='2001:41d0:e:734::1', port=9735, pubkey=bfh('03a503d8e30f2ff407096d235b5db63b4fcf3f89a653acb6f43d3fc492a7674019')),
LNPeerAddr(host='2a01:4f9:2b:2254::2', port=9735, pubkey=bfh('02f3069a342ae2883a6f29e275f06f28a56a6ea2e2d96f5888a3266444dcf542b6')),
LNPeerAddr(host='2a02:8070:24c1:100:528c:2997:6dbc:a054', port=9735, pubkey=bfh('02a45def9ae014fdd2603dd7033d157faa3a55a72b06a63ae22ef46d9fafdc6e8d')),
LNPeerAddr(host='2600:3c01::f03c:91ff:fe05:349c', port=9736, pubkey=bfh('02731b798b39a09f9f14e90ee601afb6ebb796d6e5797de14582a978770b33700f')),
LNPeerAddr(host='2a00:8a60:e012:a00::21', port=9735, pubkey=bfh('027ce055380348d7812d2ae7745701c9f93e70c1adeb2657f053f91df4f2843c71')),
LNPeerAddr(host='2604:a880:400:d1::8bd:1001', port=9735, pubkey=bfh('03649c72a4816f0cd546f84aafbd657e92a30ab474de7ab795e8b5650a427611f7')),
LNPeerAddr(host='2a01:4f8:c0c:7b31::1', port=9735, pubkey=bfh('02c16cca44562b590dd279c942200bdccfd4f990c3a69fad620c10ef2f8228eaff')),
LNPeerAddr(host='2001:41d0:1:b40d::1', port=9735, pubkey=bfh('026726a4b043d413b45b334876d17b8a98848129604429ec65532ba286a42efeac')),
]
from .trampoline import trampolines_by_id, hardcoded_trampoline_nodes, is_hardcoded_trampoline
class PaymentInfo(NamedTuple):
payment_hash: bytes
amount_msat: Optional[int]
direction: int
status: int
class ErrorAddingPeer(Exception): pass
# set some feature flags as baseline for both LNWallet and LNGossip
# note that e.g. DATA_LOSS_PROTECT is needed for LNGossip as many peers require it
BASE_FEATURES = LnFeatures(0)\
| LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT\
| LnFeatures.OPTION_STATIC_REMOTEKEY_OPT\
| LnFeatures.VAR_ONION_OPT\
| LnFeatures.PAYMENT_SECRET_OPT\
| LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT
# we do not want to receive unrequested gossip (see lnpeer.maybe_save_remote_update)
LNWALLET_FEATURES = BASE_FEATURES\
| LnFeatures.OPTION_DATA_LOSS_PROTECT_REQ\
| LnFeatures.OPTION_STATIC_REMOTEKEY_REQ\
| LnFeatures.GOSSIP_QUERIES_REQ\
| LnFeatures.BASIC_MPP_OPT\
| LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT
LNGOSSIP_FEATURES = BASE_FEATURES\
| LnFeatures.GOSSIP_QUERIES_OPT\
| LnFeatures.GOSSIP_QUERIES_REQ
class LNWorker(Logger, NetworkRetryManager[LNPeerAddr]):
INITIAL_TRAMPOLINE_FEE_LEVEL = 1 # only used for trampoline payments. set to 0 in tests.
def __init__(self, xprv, features: LnFeatures):
Logger.__init__(self)
NetworkRetryManager.__init__(
self,
max_retry_delay_normal=3600,
init_retry_delay_normal=600,
max_retry_delay_urgent=300,
init_retry_delay_urgent=4,
)
self.lock = threading.RLock()
self.node_keypair = generate_keypair(BIP32Node.from_xkey(xprv), LnKeyFamily.NODE_KEY)
self.backup_key = generate_keypair(BIP32Node.from_xkey(xprv), LnKeyFamily.BACKUP_CIPHER).privkey
self._peers = {} # type: Dict[bytes, Peer] # pubkey -> Peer # needs self.lock
self.taskgroup = SilentTaskGroup()
self.listen_server = None # type: Optional[asyncio.AbstractServer]
self.features = features
self.network = None # type: Optional[Network]
self.config = None # type: Optional[SimpleConfig]
self.stopping_soon = False # whether we are being shut down
util.register_callback(self.on_proxy_changed, ['proxy_set'])
@property
def channel_db(self):
return self.network.channel_db if self.network else None
@property
def peers(self) -> Mapping[bytes, Peer]:
"""Returns a read-only copy of peers."""
with self.lock:
return self._peers.copy()
def channels_for_peer(self, node_id: bytes) -> Dict[bytes, Channel]:
return {}
def get_node_alias(self, node_id: bytes) -> Optional[str]:
"""Returns the alias of the node, or None if unknown."""
node_alias = None
if self.channel_db:
node_info = self.channel_db.get_node_info_for_node_id(node_id)
if node_info:
node_alias = node_info.alias
else:
for k, v in hardcoded_trampoline_nodes().items():
if v.pubkey == node_id:
node_alias = k
break
return node_alias
async def maybe_listen(self):
# FIXME: only one LNWorker can listen at a time (single port)
listen_addr = self.config.get('lightning_listen')
if listen_addr:
self.logger.info(f'lightning_listen enabled. will try to bind: {listen_addr!r}')
try:
netaddr = NetAddress.from_string(listen_addr)
except Exception as e:
self.logger.error(f"failed to parse config key 'lightning_listen'. got: {e!r}")
return
addr = str(netaddr.host)
async def cb(reader, writer):
transport = LNResponderTransport(self.node_keypair.privkey, reader, writer)
try:
node_id = await transport.handshake()
except Exception as e:
self.logger.info(f'handshake failure from incoming connection: {e!r}')
return
await self._add_peer_from_transport(node_id=node_id, transport=transport)
try:
self.listen_server = await asyncio.start_server(cb, addr, netaddr.port)
except OSError as e:
self.logger.error(f"cannot listen for lightning p2p. error: {e!r}")
@ignore_exceptions # don't kill outer taskgroup
async def main_loop(self):
self.logger.info("starting taskgroup.")
try:
async with self.taskgroup as group:
await group.spawn(self._maintain_connectivity())
except asyncio.CancelledError:
raise
except Exception as e:
self.logger.exception("taskgroup died.")
finally:
self.logger.info("taskgroup stopped.")
async def _maintain_connectivity(self):
while True:
await asyncio.sleep(1)
if self.stopping_soon:
return
now = time.time()
if len(self._peers) >= NUM_PEERS_TARGET:
continue
peers = await self._get_next_peers_to_try()
for peer in peers:
if self._can_retry_addr(peer, now=now):
try:
await self._add_peer(peer.host, peer.port, peer.pubkey)
except ErrorAddingPeer as e:
self.logger.info(f"failed to add peer: {peer}. exc: {e!r}")
async def _add_peer(self, host: str, port: int, node_id: bytes) -> Peer:
if node_id in self._peers:
return self._peers[node_id]
port = int(port)
peer_addr = LNPeerAddr(host, port, node_id)
self._trying_addr_now(peer_addr)
self.logger.info(f"adding peer {peer_addr}")
if node_id == self.node_keypair.pubkey:
raise ErrorAddingPeer("cannot connect to self")
transport = LNTransport(self.node_keypair.privkey, peer_addr,
proxy=self.network.proxy)
peer = await self._add_peer_from_transport(node_id=node_id, transport=transport)
return peer
async def _add_peer_from_transport(self, *, node_id: bytes, transport: LNTransportBase) -> Peer:
peer = Peer(self, node_id, transport)
with self.lock:
existing_peer = self._peers.get(node_id)
if existing_peer:
existing_peer.close_and_cleanup()
assert node_id not in self._peers
self._peers[node_id] = peer
await self.taskgroup.spawn(peer.main_loop())
return peer
def peer_closed(self, peer: Peer) -> None:
with self.lock:
peer2 = self._peers.get(peer.pubkey)
if peer2 is peer:
self._peers.pop(peer.pubkey)
def num_peers(self) -> int:
return sum([p.is_initialized() for p in self.peers.values()])
def start_network(self, network: 'Network'):
assert network
self.network = network
self.config = network.config
self._add_peers_from_config()
asyncio.run_coroutine_threadsafe(self.main_loop(), self.network.asyncio_loop)
async def stop(self):
if self.listen_server:
self.listen_server.close()
util.unregister_callback(self.on_proxy_changed)
await self.taskgroup.cancel_remaining()
def _add_peers_from_config(self):
peer_list = self.config.get('lightning_peers', [])
for host, port, pubkey in peer_list:
asyncio.run_coroutine_threadsafe(
self._add_peer(host, int(port), bfh(pubkey)),
self.network.asyncio_loop)
def is_good_peer(self, peer: LNPeerAddr) -> bool:
# the purpose of this method is to filter peers that advertise the desired feature bits
# it is disabled for now, because feature bits published in node announcements seem to be unreliable
return True
node_id = peer.pubkey
node = self.channel_db._nodes.get(node_id)
if not node:
return False
try:
ln_compare_features(self.features, node.features)
except IncompatibleLightningFeatures:
return False
#self.logger.info(f'is_good {peer.host}')
return True
def on_peer_successfully_established(self, peer: Peer) -> None:
if isinstance(peer.transport, LNTransport):
peer_addr = peer.transport.peer_addr
# reset connection attempt count
self._on_connection_successfully_established(peer_addr)
# add into channel db
if self.channel_db:
self.channel_db.add_recent_peer(peer_addr)
# save network address into channels we might have with peer
for chan in peer.channels.values():
chan.add_or_update_peer_addr(peer_addr)
async def _get_next_peers_to_try(self) -> Sequence[LNPeerAddr]:
now = time.time()
await self.channel_db.data_loaded.wait()
# first try from recent peers
recent_peers = self.channel_db.get_recent_peers()
for peer in recent_peers:
if not peer:
continue
if peer.pubkey in self._peers:
continue
if not self._can_retry_addr(peer, now=now):
continue
if not self.is_good_peer(peer):
continue
return [peer]
# try random peer from graph
unconnected_nodes = self.channel_db.get_200_randomly_sorted_nodes_not_in(self.peers.keys())
if unconnected_nodes:
for node_id in unconnected_nodes:
addrs = self.channel_db.get_node_addresses(node_id)
if not addrs:
continue
host, port, timestamp = self.choose_preferred_address(list(addrs))
try:
peer = LNPeerAddr(host, port, node_id)
except ValueError:
continue
if not self._can_retry_addr(peer, now=now):
continue
if not self.is_good_peer(peer):
continue
#self.logger.info('taking random ln peer from our channel db')
return [peer]
# getting desperate... let's try hardcoded fallback list of peers
if constants.net in (constants.RavencoinTestnet,):
fallback_list = FALLBACK_NODE_LIST_TESTNET
elif constants.net in (constants.RavencoinMainnet,):
fallback_list = FALLBACK_NODE_LIST_MAINNET
else:
return [] # regtest??
fallback_list = [peer for peer in fallback_list if self._can_retry_addr(peer, now=now)]
if fallback_list:
return [random.choice(fallback_list)]
# last resort: try dns seeds (BOLT-10)
return await run_in_thread(self._get_peers_from_dns_seeds)
def _get_peers_from_dns_seeds(self) -> Sequence[LNPeerAddr]:
# NOTE: potentially long blocking call, do not run directly on asyncio event loop.
# Return several peers to reduce the number of dns queries.
if not constants.net.LN_DNS_SEEDS:
return []
dns_seed = random.choice(constants.net.LN_DNS_SEEDS)
self.logger.info('asking dns seed "{}" for ln peers'.format(dns_seed))
try:
# note: this might block for several seconds
# this will include bech32-encoded-pubkeys and ports
srv_answers = resolve_dns_srv('r{}.{}'.format(
constants.net.LN_REALM_BYTE, dns_seed))
except dns.exception.DNSException as e:
self.logger.info(f'failed querying (1) dns seed "{dns_seed}" for ln peers: {repr(e)}')
return []
random.shuffle(srv_answers)
num_peers = 2 * NUM_PEERS_TARGET
srv_answers = srv_answers[:num_peers]
# we now have pubkeys and ports but host is still needed
peers = []
for srv_ans in srv_answers:
try:
# note: this might block for several seconds
answers = dns.resolver.resolve(srv_ans['host'])
except dns.exception.DNSException as e:
self.logger.info(f'failed querying (2) dns seed "{dns_seed}" for ln peers: {repr(e)}')
continue
try:
ln_host = str(answers[0])
port = int(srv_ans['port'])
bech32_pubkey = srv_ans['host'].split('.')[0]
pubkey = get_compressed_pubkey_from_bech32(bech32_pubkey)
peers.append(LNPeerAddr(ln_host, port, pubkey))
except Exception as e:
self.logger.info(f'error with parsing peer from dns seed: {repr(e)}')
continue
self.logger.info(f'got {len(peers)} ln peers from dns seed')
return peers
@staticmethod
def choose_preferred_address(addr_list: Sequence[Tuple[str, int, int]]) -> Tuple[str, int, int]:
assert len(addr_list) >= 1
# choose first one that is an IP
for host, port, timestamp in addr_list:
if is_ip_address(host):
return host, port, timestamp
# otherwise choose one at random
# TODO maybe filter out onion if not on tor?
choice = random.choice(addr_list)
return choice
def on_proxy_changed(self, event, *args):
for peer in self.peers.values():
peer.close_and_cleanup()
self._clear_addr_retry_times()
@log_exceptions
async def add_peer(self, connect_str: str) -> Peer:
node_id, rest = extract_nodeid(connect_str)
peer = self._peers.get(node_id)
if not peer:
if rest is not None:
host, port = split_host_port(rest)
else:
if not self.channel_db:
addr = trampolines_by_id().get(node_id)
if not addr:
raise ConnStringFormatError(_('Address unknown for node:') + ' ' + bh2u(node_id))
host, port = addr.host, addr.port
else:
addrs = self.channel_db.get_node_addresses(node_id)
if not addrs:
raise ConnStringFormatError(_('Don\'t know any addresses for node:') + ' ' + bh2u(node_id))
host, port, timestamp = self.choose_preferred_address(list(addrs))
port = int(port)
# Try DNS-resolving the host (if needed). This is simply so that
# the caller gets a nice exception if it cannot be resolved.
try:
await asyncio.get_event_loop().getaddrinfo(host, port)
except socket.gaierror:
raise ConnStringFormatError(_('Hostname does not resolve (getaddrinfo failed)'))
# add peer
peer = await self._add_peer(host, port, node_id)
return peer
class LNGossip(LNWorker):
max_age = 14*24*3600
LOGGING_SHORTCUT = 'g'
def __init__(self):
seed = os.urandom(32)
node = BIP32Node.from_rootseed(seed, xtype='standard')
xprv = node.to_xprv()
super().__init__(xprv, LNGOSSIP_FEATURES)
self.unknown_ids = set()
def start_network(self, network: 'Network'):
assert network
super().start_network(network)
asyncio.run_coroutine_threadsafe(self.taskgroup.spawn(self.maintain_db()), self.network.asyncio_loop)
async def maintain_db(self):
await self.channel_db.data_loaded.wait()
while True:
if len(self.unknown_ids) == 0:
self.channel_db.prune_old_policies(self.max_age)
self.channel_db.prune_orphaned_channels()
await asyncio.sleep(120)
async def add_new_ids(self, ids: Iterable[bytes]):
known = self.channel_db.get_channel_ids()
new = set(ids) - set(known)
self.unknown_ids.update(new)
util.trigger_callback('unknown_channels', len(self.unknown_ids))
util.trigger_callback('gossip_peers', self.num_peers())
util.trigger_callback('ln_gossip_sync_progress')
def get_ids_to_query(self) -> Sequence[bytes]:
N = 500
l = list(self.unknown_ids)
self.unknown_ids = set(l[N:])
util.trigger_callback('unknown_channels', len(self.unknown_ids))
util.trigger_callback('ln_gossip_sync_progress')
return l[0:N]
def get_sync_progress_estimate(self) -> Tuple[Optional[int], Optional[int], Optional[int]]:
"""Estimates the gossip synchronization process and returns the number
of synchronized channels, the total channels in the network and a
rescaled percentage of the synchronization process."""
if self.num_peers() == 0:
return None, None, None
nchans_with_0p, nchans_with_1p, nchans_with_2p = self.channel_db.get_num_channels_partitioned_by_policy_count()
num_db_channels = nchans_with_0p + nchans_with_1p + nchans_with_2p
# some channels will never have two policies (only one is in gossip?...)
# so if we have at least 1 policy for a channel, we consider that channel "complete" here
current_est = num_db_channels - nchans_with_0p
total_est = len(self.unknown_ids) + num_db_channels
progress = current_est / total_est if total_est and current_est else 0
progress_percent = (1.0 / 0.95 * progress) * 100
progress_percent = min(progress_percent, 100)
progress_percent = round(progress_percent)
# take a minimal number of synchronized channels to get a more accurate
# percentage estimate
if current_est < 200:
progress_percent = 0
return current_est, total_est, progress_percent
async def process_gossip(self, chan_anns, node_anns, chan_upds):
# note: we run in the originating peer's TaskGroup, so we can safely raise here
# and disconnect only from that peer
await self.channel_db.data_loaded.wait()
self.logger.debug(f'process_gossip {len(chan_anns)} {len(node_anns)} {len(chan_upds)}')
# channel announcements
def process_chan_anns():
for payload in chan_anns:
self.channel_db.verify_channel_announcement(payload)
self.channel_db.add_channel_announcements(chan_anns)
await run_in_thread(process_chan_anns)
# node announcements
def process_node_anns():
for payload in node_anns:
self.channel_db.verify_node_announcement(payload)
self.channel_db.add_node_announcements(node_anns)
await run_in_thread(process_node_anns)
# channel updates
categorized_chan_upds = await run_in_thread(partial(
self.channel_db.add_channel_updates,
chan_upds,
max_age=self.max_age))
orphaned = categorized_chan_upds.orphaned
if orphaned:
self.logger.info(f'adding {len(orphaned)} unknown channel ids')
orphaned_ids = [c['short_channel_id'] for c in orphaned]
await self.add_new_ids(orphaned_ids)
if categorized_chan_upds.good:
self.logger.debug(f'on_channel_update: {len(categorized_chan_upds.good)}/{len(chan_upds)}')
class LNWallet(LNWorker):
lnwatcher: Optional['LNWalletWatcher']
MPP_EXPIRY = 120
TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 3 # seconds
def __init__(self, wallet: 'Abstract_Wallet', xprv):
self.wallet = wallet
self.db = wallet.db
Logger.__init__(self)
LNWorker.__init__(self, xprv, LNWALLET_FEATURES)
self.config = wallet.config
self.lnwatcher = None
self.lnrater: LNRater = None
self.payments = self.db.get_dict('lightning_payments') # RHASH -> amount, direction, is_paid
self.preimages = self.db.get_dict('lightning_preimages') # RHASH -> preimage
# note: this sweep_address is only used as fallback; as it might result in address-reuse
self.sweep_address = wallet.get_new_sweep_address_for_channel()
self.logs = defaultdict(list) # type: Dict[str, List[HtlcLog]] # key is RHASH # (not persisted)
# used in tests
self.enable_htlc_settle = True
self.enable_htlc_forwarding = True
# note: accessing channels (besides simple lookup) needs self.lock!
self._channels = {} # type: Dict[bytes, Channel]
channels = self.db.get_dict("channels")
for channel_id, c in random_shuffled_copy(channels.items()):
self._channels[bfh(channel_id)] = Channel(c, sweep_address=self.sweep_address, lnworker=self)
self._channel_backups = {} # type: Dict[bytes, ChannelBackup]
# order is important: imported should overwrite onchain
for name in ["onchain_channel_backups", "imported_channel_backups"]:
channel_backups = self.db.get_dict(name)
for channel_id, storage in channel_backups.items():
self._channel_backups[bfh(channel_id)] = ChannelBackup(storage, sweep_address=self.sweep_address, lnworker=self)
self.sent_htlcs = defaultdict(asyncio.Queue) # type: Dict[bytes, asyncio.Queue[HtlcLog]]
self.sent_htlcs_routes = dict() # (RHASH, scid, htlc_id) -> route, payment_secret, amount_msat, bucket_msat
self.sent_buckets = dict() # payment_secret -> (amount_sent, amount_failed)
self.received_mpp_htlcs = dict() # RHASH -> mpp_status, htlc_set
self.swap_manager = SwapManager(wallet=self.wallet, lnworker=self)
# detect inflight payments
self.inflight_payments = set() # (not persisted) keys of invoices that are in PR_INFLIGHT state
for payment_hash in self.get_payments(status='inflight').keys():
self.set_invoice_status(payment_hash.hex(), PR_INFLIGHT)
self.trampoline_forwarding_failures = {} # todo: should be persisted
def has_deterministic_node_id(self):
return bool(self.db.get('lightning_xprv'))
def has_recoverable_channels(self):
# TODO: expose use_recoverable_channels in preferences
return self.has_deterministic_node_id() \
and self.config.get('use_recoverable_channels', True) \
and not (self.config.get('lightning_listen'))
@property
def channels(self) -> Mapping[bytes, Channel]:
"""Returns a read-only copy of channels."""
with self.lock:
return self._channels.copy()
@property
def channel_backups(self) -> Mapping[bytes, ChannelBackup]:
"""Returns a read-only copy of channels."""
with self.lock:
return self._channel_backups.copy()
def get_channel_by_id(self, channel_id: bytes) -> Optional[Channel]:
return self._channels.get(channel_id, None)
def diagnostic_name(self):
return self.wallet.diagnostic_name()
@ignore_exceptions
@log_exceptions
async def sync_with_local_watchtower(self):
watchtower = self.network.local_watchtower
if watchtower:
while True:
for chan in self.channels.values():
await self.sync_channel_with_watchtower(chan, watchtower.sweepstore)
await asyncio.sleep(5)
@ignore_exceptions
@log_exceptions
async def sync_with_remote_watchtower(self):
while True:
# periodically poll if the user updated 'watchtower_url'
await asyncio.sleep(5)
watchtower_url = self.config.get('watchtower_url')
if not watchtower_url:
continue
parsed_url = urllib.parse.urlparse(watchtower_url)
if not (parsed_url.scheme == 'https' or is_private_netaddress(parsed_url.hostname)):
self.logger.warning(f"got watchtower URL for remote tower but we won't use it! "
f"can only use HTTPS (except if private IP): not using {watchtower_url!r}")
continue
# try to sync with the remote watchtower
try:
async with make_aiohttp_session(proxy=self.network.proxy) as session:
watchtower = JsonRPCClient(session, watchtower_url)
watchtower.add_method('get_ctn')
watchtower.add_method('add_sweep_tx')
for chan in self.channels.values():
await self.sync_channel_with_watchtower(chan, watchtower)
except aiohttp.client_exceptions.ClientConnectorError:
self.logger.info(f'could not contact remote watchtower {watchtower_url}')
async def sync_channel_with_watchtower(self, chan: Channel, watchtower):
outpoint = chan.funding_outpoint.to_str()
addr = chan.get_funding_address()
current_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
watchtower_ctn = await watchtower.get_ctn(outpoint, addr)
for ctn in range(watchtower_ctn + 1, current_ctn):
sweeptxs = chan.create_sweeptxs(ctn)
for tx in sweeptxs:
await watchtower.add_sweep_tx(outpoint, ctn, tx.inputs()[0].prevout.to_str(), tx.serialize())
def start_network(self, network: 'Network'):
assert network
self.network = network
self.config = network.config
self.lnwatcher = LNWalletWatcher(self, network)
self.lnwatcher.start_network(network)
self.swap_manager.start_network(network=network, lnwatcher=self.lnwatcher)
self.lnrater = LNRater(self, network)
for chan in self.channels.values():
self.lnwatcher.add_channel(chan.funding_outpoint.to_str(), chan.get_funding_address())
for cb in self.channel_backups.values():
self.lnwatcher.add_channel(cb.funding_outpoint.to_str(), cb.get_funding_address())
for coro in [
self.maybe_listen(),
self.lnwatcher.on_network_update('network_updated'), # shortcut (don't block) if funding tx locked and verified
self.reestablish_peers_and_channels(),
self.sync_with_local_watchtower(),
self.sync_with_remote_watchtower(),
]:
tg_coro = self.taskgroup.spawn(coro)
asyncio.run_coroutine_threadsafe(tg_coro, self.network.asyncio_loop)
async def stop(self):
self.stopping_soon = True
if self.listen_server: # stop accepting new peers
self.listen_server.close()
async with ignore_after(self.TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS):
await self.wait_for_received_pending_htlcs_to_get_removed()
await LNWorker.stop(self)
if self.lnwatcher:
await self.lnwatcher.stop()
self.lnwatcher = None
async def wait_for_received_pending_htlcs_to_get_removed(self):
assert self.stopping_soon is True
# We try to fail pending MPP HTLCs, and wait a bit for them to get removed.
# Note: even without MPP, if we just failed/fulfilled an HTLC, it is good
# to wait a bit for it to become irrevocably removed.
# Note: we don't wait for *all htlcs* to get removed, only for those
# that we can already fail/fulfill. e.g. forwarded htlcs cannot be removed
async with TaskGroup() as group:
for peer in self.peers.values():
await group.spawn(peer.wait_one_htlc_switch_iteration())
while True:
if all(not peer.received_htlcs_pending_removal for peer in self.peers.values()):
break
async with TaskGroup(wait=any) as group:
for peer in self.peers.values():
await group.spawn(peer.received_htlc_removed_event.wait())
def peer_closed(self, peer):
for chan in self.channels_for_peer(peer.pubkey).values():
chan.peer_state = PeerState.DISCONNECTED
util.trigger_callback('channel', self.wallet, chan)
super().peer_closed(peer)
def get_payments(self, *, status=None):
# return one item per payment_hash
# note: with AMP we will have several channels per payment
out = defaultdict(list)
for chan in self.channels.values():
d = chan.get_payments(status=status)
for k, v in d.items():
out[k] += v
return out
def get_payment_value(self, info: Optional['PaymentInfo'], plist):
amount_msat = 0
fee_msat = None
for chan_id, htlc, _direction, _status in plist:
amount_msat += int(_direction) * htlc.amount_msat
if _direction == SENT and info and info.amount_msat:
fee_msat = (fee_msat or 0) - info.amount_msat - amount_msat
timestamp = min([htlc.timestamp for chan_id, htlc, _direction, _status in plist])
return amount_msat, fee_msat, timestamp
def get_lightning_history(self):
out = {}
for payment_hash, plist in self.get_payments(status='settled').items():
if len(plist) == 0:
continue
key = payment_hash.hex()
info = self.get_payment_info(payment_hash)
amount_msat, fee_msat, timestamp = self.get_payment_value(info, plist)
if info is not None:
label = self.wallet.get_label(key)
direction = ('sent' if info.direction == SENT else 'received') if len(plist)==1 else 'self-payment'
else:
direction = 'forwarding'
label = _('Forwarding')
preimage = self.get_preimage(payment_hash).hex()
item = {
'type': 'payment',
'label': label,
'timestamp': timestamp or 0,
'date': timestamp_to_datetime(timestamp),
'direction': direction,
'amount_msat': amount_msat,
'fee_msat': fee_msat,
'payment_hash': key,
'preimage': preimage,
}
# add group_id to swap transactions
swap = self.swap_manager.get_swap(payment_hash)
if swap:
if swap.is_reverse:
item['group_id'] = swap.spending_txid
item['group_label'] = 'Reverse swap' + ' ' + self.config.format_amount_and_units(swap.lightning_amount)
else:
item['group_id'] = swap.funding_txid
item['group_label'] = 'Forward swap' + ' ' + self.config.format_amount_and_units(swap.onchain_amount)
# done
out[payment_hash] = item
return out
def get_onchain_history(self):
current_height = self.wallet.get_local_height()
out = {}
# add funding events
for chan in self.channels.values():
item = chan.get_funding_height()
if item is None:
continue
if not self.lnwatcher:
continue # lnwatcher not available with --offline (its data is not persisted)
funding_txid, funding_height, funding_timestamp = item
tx_height = self.lnwatcher.get_tx_height(funding_txid)
item = {
'channel_id': bh2u(chan.channel_id),
'type': 'channel_opening',
'label': self.wallet.get_label_for_txid(funding_txid) or (_('Open channel') + ' ' + chan.get_id_for_log()),
'txid': funding_txid,
'amount_msat': chan.balance(LOCAL, ctn=0),
'direction': 'received',
'timestamp': tx_height.timestamp,
'date': timestamp_to_datetime(tx_height.timestamp),
'fee_sat': None,
'fee_msat': None,
'height': tx_height.height,
'confirmations': tx_height.conf,
}
out[funding_txid] = item
item = chan.get_closing_height()
if item is None:
continue
closing_txid, closing_height, closing_timestamp = item
tx_height = self.lnwatcher.get_tx_height(closing_txid)
item = {
'channel_id': bh2u(chan.channel_id),
'txid': closing_txid,
'label': self.wallet.get_label_for_txid(closing_txid) or (_('Close channel') + ' ' + chan.get_id_for_log()),
'type': 'channel_closure',
'amount_msat': -chan.balance_minus_outgoing_htlcs(LOCAL),
'direction': 'sent',
'timestamp': tx_height.timestamp,
'date': timestamp_to_datetime(tx_height.timestamp),
'fee_sat': None,
'fee_msat': None,
'height': tx_height.height,
'confirmations': tx_height.conf,
}
out[closing_txid] = item
# add info about submarine swaps
settled_payments = self.get_payments(status='settled')
for payment_hash_hex, swap in self.swap_manager.swaps.items():
txid = swap.spending_txid if swap.is_reverse else swap.funding_txid
if txid is None:
continue
payment_hash = bytes.fromhex(payment_hash_hex)
if payment_hash in settled_payments:
plist = settled_payments[payment_hash]
info = self.get_payment_info(payment_hash)
amount_msat, fee_msat, timestamp = self.get_payment_value(info, plist)
else:
amount_msat = 0
label = 'Reverse swap' if swap.is_reverse else 'Forward swap'
delta = current_height - swap.locktime
if not swap.is_redeemed and swap.spending_txid is None and delta < 0:
label += f' (refundable in {-delta} blocks)' # fixme: only if unspent
out[txid] = {
'txid': txid,
'group_id': txid,
'amount_msat': 0,
#'amount_msat': amount_msat, # must not be added
'type': 'swap',
'label': self.wallet.get_label_for_txid(txid) or label,
}
return out
def get_history(self):
out = list(self.get_lightning_history().values()) + list(self.get_onchain_history().values())
# sort by timestamp
out.sort(key=lambda x: (x.get('timestamp') or float("inf")))
balance_msat = 0
for item in out:
balance_msat += item['amount_msat']
item['balance_msat'] = balance_msat
return out
def channel_peers(self) -> List[bytes]:
node_ids = [chan.node_id for chan in self.channels.values() if not chan.is_closed()]
return node_ids
def channels_for_peer(self, node_id):
assert type(node_id) is bytes
return {chan_id: chan for (chan_id, chan) in self.channels.items()
if chan.node_id == node_id}
def channel_state_changed(self, chan: Channel):
if type(chan) is Channel:
self.save_channel(chan)
util.trigger_callback('channel', self.wallet, chan)
def save_channel(self, chan: Channel):
assert type(chan) is Channel
if chan.config[REMOTE].next_per_commitment_point == chan.config[REMOTE].current_per_commitment_point:
raise Exception("Tried to save channel with next_point == current_point, this should not happen")
self.wallet.save_db()
util.trigger_callback('channel', self.wallet, chan)
def channel_by_txo(self, txo: str) -> Optional[AbstractChannel]:
for chan in self.channels.values():
if chan.funding_outpoint.to_str() == txo:
return chan
for chan in self.channel_backups.values():
if chan.funding_outpoint.to_str() == txo:
return chan
async def on_channel_update(self, chan: Channel):
if type(chan) is ChannelBackup:
util.trigger_callback('channel', self.wallet, chan)
return
if chan.get_state() == ChannelState.OPEN and chan.should_be_closed_due_to_expiring_htlcs(self.network.get_local_height()):
self.logger.info(f"force-closing due to expiring htlcs")
await self.try_force_closing(chan.channel_id)
elif chan.get_state() == ChannelState.FUNDED:
peer = self._peers.get(chan.node_id)
if peer and peer.is_initialized():
peer.send_funding_locked(chan)
elif chan.get_state() == ChannelState.OPEN:
peer = self._peers.get(chan.node_id)
if peer:
await peer.maybe_update_fee(chan)
conf = self.lnwatcher.get_tx_height(chan.funding_outpoint.txid).conf
peer.on_network_update(chan, conf)
elif chan.get_state() == ChannelState.FORCE_CLOSING:
force_close_tx = chan.force_close_tx()
txid = force_close_tx.txid()
height = self.lnwatcher.get_tx_height(txid).height
if height == TX_HEIGHT_LOCAL:
self.logger.info('REBROADCASTING CLOSING TX')
await self.network.try_broadcasting(force_close_tx, 'force-close')
@log_exceptions
async def _open_channel_coroutine(
self, *,
connect_str: str,
funding_tx: PartialTransaction,
funding_sat: int,
push_sat: int,
password: Optional[str]) -> Tuple[Channel, PartialTransaction]:
peer = await self.add_peer(connect_str)
coro = peer.channel_establishment_flow(
funding_tx=funding_tx,
funding_sat=funding_sat,
push_msat=push_sat * 1000,
temp_channel_id=os.urandom(32))
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
util.trigger_callback('channels_updated', self.wallet)
self.wallet.add_transaction(funding_tx) # save tx as local into the wallet
self.wallet.sign_transaction(funding_tx, password)
self.wallet.set_label(funding_tx.txid(), _('Open channel'))
if funding_tx.is_complete():
await self.network.try_broadcasting(funding_tx, 'open_channel')
return chan, funding_tx
def add_channel(self, chan: Channel):
with self.lock:
self._channels[chan.channel_id] = chan
self.lnwatcher.add_channel(chan.funding_outpoint.to_str(), chan.get_funding_address())
def add_new_channel(self, chan: Channel):
self.add_channel(chan)
channels_db = self.db.get_dict('channels')
channels_db[chan.channel_id.hex()] = chan.storage
for addr in chan.get_wallet_addresses_channel_might_want_reserved():
self.wallet.set_reserved_state_of_address(addr, reserved=True)
try:
self.save_channel(chan)
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.wallet.save_backup(backup_dir)
except:
chan.set_state(ChannelState.REDEEMED)
self.remove_channel(chan.channel_id)
raise
def cb_data(self, node_id):
return CB_MAGIC_BYTES + node_id[0:16]
def decrypt_cb_data(self, encrypted_data, funding_address):
funding_scripthash = bytes.fromhex(address_to_scripthash(funding_address))
nonce = funding_scripthash[0:12]
return chacha20_decrypt(key=self.backup_key, data=encrypted_data, nonce=nonce)
def encrypt_cb_data(self, data, funding_address):
funding_scripthash = bytes.fromhex(address_to_scripthash(funding_address))
nonce = funding_scripthash[0:12]
return chacha20_encrypt(key=self.backup_key, data=data, nonce=nonce)
def mktx_for_open_channel(
self, *,
coins: Sequence[PartialTxInput],
funding_sat: int,
node_id: bytes,
fee_est=None) -> PartialTransaction:
outputs = [PartialTxOutput.from_address_and_value(ln_dummy_address(), funding_sat)]
if self.has_recoverable_channels():
dummy_scriptpubkey = make_op_return(self.cb_data(node_id))
outputs.append(PartialTxOutput(scriptpubkey=dummy_scriptpubkey, value=0))
tx = self.wallet.make_unsigned_transaction(
coins=coins,
outputs=outputs,
fee=fee_est)
tx.set_rbf(False)
return tx
def open_channel(self, *, connect_str: str, funding_tx: PartialTransaction,
funding_sat: int, push_amt_sat: int, password: str = None) -> Tuple[Channel, PartialTransaction]:
if funding_sat > LN_MAX_FUNDING_SAT:
raise Exception(_("Requested channel capacity is over protocol allowed maximum."))
coro = self._open_channel_coroutine(
connect_str=connect_str, funding_tx=funding_tx, funding_sat=funding_sat,
push_sat=push_amt_sat, password=password)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
try:
chan, funding_tx = fut.result()
except concurrent.futures.TimeoutError:
raise Exception(_("open_channel timed out"))
return chan, funding_tx
def get_channel_by_short_id(self, short_channel_id: bytes) -> Optional[Channel]:
for chan in self.channels.values():
if chan.short_channel_id == short_channel_id:
return chan
def create_routes_from_invoice(self, amount_msat: int, decoded_invoice: LnAddr, *, full_path=None):
return self.create_routes_for_payment(
amount_msat=amount_msat,
final_total_msat=amount_msat,
invoice_pubkey=decoded_invoice.pubkey.serialize(),
min_cltv_expiry=decoded_invoice.get_min_final_cltv_expiry(),
r_tags=decoded_invoice.get_routing_info('r'),
invoice_features=decoded_invoice.get_features(),
trampoline_fee_level=0,
use_two_trampolines=False,
payment_hash=decoded_invoice.paymenthash,
payment_secret=decoded_invoice.payment_secret,
full_path=full_path)
@log_exceptions
async def pay_invoice(
self, invoice: str, *,
amount_msat: int = None,
attempts: int = 1,
full_path: LNPaymentPath = None) -> Tuple[bool, List[HtlcLog]]:
lnaddr = self._check_invoice(invoice, amount_msat=amount_msat)
min_cltv_expiry = lnaddr.get_min_final_cltv_expiry()
payment_hash = lnaddr.paymenthash
key = payment_hash.hex()
payment_secret = lnaddr.payment_secret
invoice_pubkey = lnaddr.pubkey.serialize()
invoice_features = lnaddr.get_features()
r_tags = lnaddr.get_routing_info('r')
amount_to_pay = lnaddr.get_amount_msat()
status = self.get_payment_status(payment_hash)
if status == PR_PAID:
raise PaymentFailure(_("This invoice has been paid already"))
if status == PR_INFLIGHT:
raise PaymentFailure(_("A payment was already initiated for this invoice"))
if payment_hash in self.get_payments(status='inflight'):
raise PaymentFailure(_("A previous attempt to pay this invoice did not clear"))
info = PaymentInfo(payment_hash, amount_to_pay, SENT, PR_UNPAID)
self.save_payment_info(info)
self.wallet.set_label(key, lnaddr.get_description())
self.set_invoice_status(key, PR_INFLIGHT)
try:
await self.pay_to_node(
node_pubkey=invoice_pubkey,
payment_hash=payment_hash,
payment_secret=payment_secret,
amount_to_pay=amount_to_pay,
min_cltv_expiry=min_cltv_expiry,
r_tags=r_tags,
invoice_features=invoice_features,
attempts=attempts,
full_path=full_path)
success = True
except PaymentFailure as e:
self.logger.info(f'payment failure: {e!r}')
success = False
reason = str(e)
if success:
self.set_invoice_status(key, PR_PAID)
util.trigger_callback('payment_succeeded', self.wallet, key)
else:
self.set_invoice_status(key, PR_UNPAID)
util.trigger_callback('payment_failed', self.wallet, key, reason)
log = self.logs[key]
return success, log
async def pay_to_node(
self, *,
node_pubkey: bytes,
payment_hash: bytes,
payment_secret: Optional[bytes],
amount_to_pay: int, # in msat
min_cltv_expiry: int,
r_tags,
invoice_features: int,
attempts: int = 1,
full_path: LNPaymentPath = None,
fwd_trampoline_onion=None,
fwd_trampoline_fee=None,
fwd_trampoline_cltv_delta=None) -> None:
if fwd_trampoline_onion:
# todo: compare to the fee of the actual route we found
if fwd_trampoline_fee < 1000:
raise OnionRoutingFailure(code=OnionFailureCode.TRAMPOLINE_FEE_INSUFFICIENT, data=b'')
if fwd_trampoline_cltv_delta < 576:
raise OnionRoutingFailure(code=OnionFailureCode.TRAMPOLINE_EXPIRY_TOO_SOON, data=b'')
self.logs[payment_hash.hex()] = log = []
trampoline_fee_level = self.INITIAL_TRAMPOLINE_FEE_LEVEL
use_two_trampolines = True # only used for pay to legacy
amount_inflight = 0 # what we sent in htlcs (that receiver gets, without fees)
while True:
amount_to_send = amount_to_pay - amount_inflight
if amount_to_send > 0:
# 1. create a set of routes for remaining amount.
# note: path-finding runs in a separate thread so that we don't block the asyncio loop
# graph updates might occur during the computation
routes = await run_in_thread(partial(
self.create_routes_for_payment,
amount_msat=amount_to_send,
final_total_msat=amount_to_pay,
invoice_pubkey=node_pubkey,
min_cltv_expiry=min_cltv_expiry,
r_tags=r_tags,
invoice_features=invoice_features,
full_path=full_path,
payment_hash=payment_hash,
payment_secret=payment_secret,
trampoline_fee_level=trampoline_fee_level,
use_two_trampolines=use_two_trampolines,
fwd_trampoline_onion=fwd_trampoline_onion))
# 2. send htlcs
for route, amount_msat, total_msat, amount_receiver_msat, cltv_delta, bucket_payment_secret, trampoline_onion in routes:
amount_inflight += amount_receiver_msat
if amount_inflight > amount_to_pay: # safety belts
raise Exception(f"amount_inflight={amount_inflight} > amount_to_pay={amount_to_pay}")
await self.pay_to_route(
route=route,
amount_msat=amount_msat,
total_msat=total_msat,
amount_receiver_msat=amount_receiver_msat,
payment_hash=payment_hash,
payment_secret=bucket_payment_secret,
min_cltv_expiry=cltv_delta,
trampoline_onion=trampoline_onion)
util.trigger_callback('invoice_status', self.wallet, payment_hash.hex())
# 3. await a queue
self.logger.info(f"amount inflight {amount_inflight}")
htlc_log = await self.sent_htlcs[payment_hash].get()
amount_inflight -= htlc_log.amount_msat
if amount_inflight < 0:
raise Exception(f"amount_inflight={amount_inflight} < 0")
log.append(htlc_log)
if htlc_log.success:
# TODO: report every route to liquidity hints for mpp
# even in the case of success, we report channels of the
# route as being able to send the same amount in the future,
# as we assume to not know the capacity
if self.network.path_finder:
self.network.path_finder.update_liquidity_hints(htlc_log.route, htlc_log.amount_msat)
return
# htlc failed
if len(log) >= attempts:
raise PaymentFailure('Giving up after %d attempts'%len(log))
# if we get a tmp channel failure, it might work to split the amount and try more routes
# if we get a channel update, we might retry the same route and amount
route = htlc_log.route
sender_idx = htlc_log.sender_idx
failure_msg = htlc_log.failure_msg
code, data = failure_msg.code, failure_msg.data
self.logger.info(f"UPDATE_FAIL_HTLC. code={repr(code)}. "
f"decoded_data={failure_msg.decode_data()}. data={data.hex()!r}")
self.logger.info(f"error reported by {bh2u(route[sender_idx].node_id)}")
if code == OnionFailureCode.MPP_TIMEOUT:
raise PaymentFailure(failure_msg.code_name())
# trampoline
if not self.channel_db:
if code == OnionFailureCode.TRAMPOLINE_FEE_INSUFFICIENT:
# todo: parse the node parameters here (not returned by eclair yet)
trampoline_fee_level += 1
continue
elif use_two_trampolines:
use_two_trampolines = False
else:
raise PaymentFailure(failure_msg.code_name())
else:
self.handle_error_code_from_failed_htlc(
route=route, sender_idx=sender_idx, failure_msg=failure_msg, amount=htlc_log.amount_msat)
async def pay_to_route(
self, *,
route: LNPaymentRoute,
amount_msat: int,
total_msat: int,
amount_receiver_msat:int,
payment_hash: bytes,
payment_secret: Optional[bytes],
min_cltv_expiry: int,
trampoline_onion: bytes = None) -> None:
# send a single htlc
short_channel_id = route[0].short_channel_id
chan = self.get_channel_by_short_id(short_channel_id)
peer = self._peers.get(route[0].node_id)
if not peer:
raise PaymentFailure('Dropped peer')
await peer.initialized
htlc = peer.pay(
route=route,
chan=chan,
amount_msat=amount_msat,
total_msat=total_msat,
payment_hash=payment_hash,
min_final_cltv_expiry=min_cltv_expiry,
payment_secret=payment_secret,
trampoline_onion=trampoline_onion)
key = (payment_hash, short_channel_id, htlc.htlc_id)
self.sent_htlcs_routes[key] = route, payment_secret, amount_msat, total_msat, amount_receiver_msat
# if we sent MPP to a trampoline, add item to sent_buckets
if not self.channel_db and amount_msat != total_msat:
if payment_secret not in self.sent_buckets:
self.sent_buckets[payment_secret] = (0, 0)
amount_sent, amount_failed = self.sent_buckets[payment_secret]
amount_sent += amount_receiver_msat
self.sent_buckets[payment_secret] = amount_sent, amount_failed
util.trigger_callback('htlc_added', chan, htlc, SENT)
def handle_error_code_from_failed_htlc(
self,
*,
route: LNPaymentRoute,
sender_idx: int,
failure_msg: OnionRoutingFailure,
amount: int) -> None:
code, data = failure_msg.code, failure_msg.data
# TODO can we use lnmsg.OnionWireSerializer here?
# TODO update onion_wire.csv
# handle some specific error codes
failure_codes = {
OnionFailureCode.TEMPORARY_CHANNEL_FAILURE: 0,
OnionFailureCode.AMOUNT_BELOW_MINIMUM: 8,
OnionFailureCode.FEE_INSUFFICIENT: 8,
OnionFailureCode.INCORRECT_CLTV_EXPIRY: 4,
OnionFailureCode.EXPIRY_TOO_SOON: 0,
OnionFailureCode.CHANNEL_DISABLED: 2,
}
# determine a fallback channel to blacklist if we don't get the erring
# channel via the payload
if sender_idx is None:
raise PaymentFailure(failure_msg.code_name())
try:
fallback_channel = route[sender_idx + 1].short_channel_id
except IndexError:
raise PaymentFailure(f'payment destination reported error: {failure_msg.code_name()}') from None
# TODO: handle unknown next peer?
# handle failure codes that include a channel update
if code in failure_codes:
offset = failure_codes[code]
channel_update_len = int.from_bytes(data[offset:offset+2], byteorder="big")
channel_update_as_received = data[offset+2: offset+2+channel_update_len]
payload = self._decode_channel_update_msg(channel_update_as_received)
if payload is None:
self.logger.info(f'could not decode channel_update for failed htlc: '
f'{channel_update_as_received.hex()}')
self.network.path_finder.liquidity_hints.add_to_blacklist(fallback_channel)
else:
# apply the channel update or get blacklisted
blacklist, update = self._handle_chanupd_from_failed_htlc(
payload, route=route, sender_idx=sender_idx)
# we interpret a temporary channel failure as a liquidity issue
# in the channel and update our liquidity hints accordingly
if code == OnionFailureCode.TEMPORARY_CHANNEL_FAILURE:
self.network.path_finder.update_liquidity_hints(
route,
amount,
failing_channel=ShortChannelID(payload['short_channel_id']))
elif blacklist:
self.network.path_finder.liquidity_hints.add_to_blacklist(
payload['short_channel_id'])
# if we can't decide on some action, we are stuck
if not (blacklist or update):
raise PaymentFailure(failure_msg.code_name())
# for errors that do not include a channel update
else:
self.network.path_finder.liquidity_hints.add_to_blacklist(fallback_channel)
def _handle_chanupd_from_failed_htlc(self, payload, *, route, sender_idx) -> Tuple[bool, bool]:
blacklist = False
update = False
try:
r = self.channel_db.add_channel_update(payload, verify=True)
except InvalidGossipMsg:
return True, False # blacklist
short_channel_id = ShortChannelID(payload['short_channel_id'])
if r == UpdateStatus.GOOD:
self.logger.info(f"applied channel update to {short_channel_id}")
# TODO: add test for this
# FIXME: this does not work for our own unannounced channels.
for chan in self.channels.values():
if chan.short_channel_id == short_channel_id:
chan.set_remote_update(payload)
update = True
elif r == UpdateStatus.ORPHANED:
# maybe it is a private channel (and data in invoice was outdated)
self.logger.info(f"Could not find {short_channel_id}. maybe update is for private channel?")
start_node_id = route[sender_idx].node_id
update = self.channel_db.add_channel_update_for_private_channel(payload, start_node_id)
blacklist = not update
elif r == UpdateStatus.EXPIRED:
blacklist = True
elif r == UpdateStatus.DEPRECATED:
self.logger.info(f'channel update is not more recent.')
blacklist = True
elif r == UpdateStatus.UNCHANGED:
blacklist = True
return blacklist, update
@classmethod
def _decode_channel_update_msg(cls, chan_upd_msg: bytes) -> Optional[Dict[str, Any]]:
channel_update_as_received = chan_upd_msg
channel_update_typed = (258).to_bytes(length=2, byteorder="big") + channel_update_as_received
# note: some nodes put channel updates in error msgs with the leading msg_type already there.
# we try decoding both ways here.
try:
message_type, payload = decode_msg(channel_update_typed)
if payload['chain_hash'] != constants.net.rev_genesis_bytes(): raise Exception()
payload['raw'] = channel_update_typed
return payload
except: # FIXME: too broad
try:
message_type, payload = decode_msg(channel_update_as_received)
if payload['chain_hash'] != constants.net.rev_genesis_bytes(): raise Exception()
payload['raw'] = channel_update_as_received
return payload
except:
return None
@staticmethod
def _check_invoice(invoice: str, *, amount_msat: int = None) -> LnAddr:
addr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
if addr.is_expired():
raise InvoiceError(_("This invoice has expired"))
if amount_msat: # replace amt in invoice. main usecase is paying zero amt invoices
existing_amt_msat = addr.get_amount_msat()
if existing_amt_msat and amount_msat < existing_amt_msat:
raise Exception("cannot pay lower amt than what is originally in LN invoice")
addr.amount = Decimal(amount_msat) / COIN / 1000
if addr.amount is None:
raise InvoiceError(_("Missing amount"))
if addr.get_min_final_cltv_expiry() > lnutil.NBLOCK_CLTV_EXPIRY_TOO_FAR_INTO_FUTURE:
raise InvoiceError("{}\n{}".format(
_("Invoice wants us to risk locking funds for unreasonably long."),
f"min_final_cltv_expiry: {addr.get_min_final_cltv_expiry()}"))
return addr
def is_trampoline_peer(self, node_id: bytes) -> bool:
# until trampoline is advertised in lnfeatures, check against hardcoded list
if is_hardcoded_trampoline(node_id):
return True
peer = self._peers.get(node_id)
if peer and peer.their_features.supports(LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT):
return True
return False
def suggest_peer(self) -> Optional[bytes]:
if self.channel_db:
return self.lnrater.suggest_peer()
else:
return random.choice(list(hardcoded_trampoline_nodes().values())).pubkey
@profiler
def create_routes_for_payment(
self, *,
amount_msat: int, # part of payment amount we want routes for now
final_total_msat: int, # total payment amount final receiver will get
invoice_pubkey,
min_cltv_expiry,
r_tags,
invoice_features: int,
payment_hash,
payment_secret,
trampoline_fee_level: int,
use_two_trampolines: bool,
fwd_trampoline_onion = None,
full_path: LNPaymentPath = None) -> Sequence[Tuple[LNPaymentRoute, int]]:
"""Creates multiple routes for splitting a payment over the available
private channels.
We first try to conduct the payment over a single channel. If that fails
and mpp is supported by the receiver, we will split the payment."""
# It could happen that the pathfinding uses a channel
# in the graph multiple times, meaning we could exhaust
# its capacity. This could be dealt with by temporarily
# iteratively blacklisting channels for this mpp attempt.
invoice_features = LnFeatures(invoice_features)
trampoline_features = LnFeatures.VAR_ONION_OPT
local_height = self.network.get_local_height()
active_channels = [chan for chan in self.channels.values() if chan.is_active() and not chan.is_frozen_for_sending()]
try:
# try to send over a single channel
if not self.channel_db:
for chan in active_channels:
if not self.is_trampoline_peer(chan.node_id):
continue
if chan.node_id == invoice_pubkey:
trampoline_onion = None
trampoline_payment_secret = payment_secret
trampoline_total_msat = final_total_msat
amount_with_fees = amount_msat
cltv_delta = min_cltv_expiry
else:
trampoline_onion, amount_with_fees, cltv_delta = create_trampoline_route_and_onion(
amount_msat=amount_msat,
total_msat=final_total_msat,
min_cltv_expiry=min_cltv_expiry,
my_pubkey=self.node_keypair.pubkey,
invoice_pubkey=invoice_pubkey,
invoice_features=invoice_features,
node_id=chan.node_id,
r_tags=r_tags,
payment_hash=payment_hash,
payment_secret=payment_secret,
local_height=local_height,
trampoline_fee_level=trampoline_fee_level,
use_two_trampolines=use_two_trampolines)
trampoline_payment_secret = os.urandom(32)
trampoline_total_msat = amount_with_fees
if chan.available_to_spend(LOCAL, strict=True) < amount_with_fees:
continue
route = [
RouteEdge(
start_node=self.node_keypair.pubkey,
end_node=chan.node_id,
short_channel_id=chan.short_channel_id,
fee_base_msat=0,
fee_proportional_millionths=0,
cltv_expiry_delta=0,
node_features=trampoline_features)
]
routes = [(route, amount_with_fees, trampoline_total_msat, amount_msat, cltv_delta, trampoline_payment_secret, trampoline_onion)]
break
else:
raise NoPathFound()
else:
route = self.create_route_for_payment(
amount_msat=amount_msat,
invoice_pubkey=invoice_pubkey,
min_cltv_expiry=min_cltv_expiry,
r_tags=r_tags,
invoice_features=invoice_features,
channels=active_channels,
full_path=full_path)
routes = [(route, amount_msat, final_total_msat, amount_msat, min_cltv_expiry, payment_secret, fwd_trampoline_onion)]
except NoPathFound:
if not invoice_features.supports(LnFeatures.BASIC_MPP_OPT):
raise
channels_with_funds = {
(chan.channel_id, chan.node_id): int(chan.available_to_spend(HTLCOwner.LOCAL))
for chan in active_channels}
self.logger.info(f"channels_with_funds: {channels_with_funds}")
# for trampoline mpp payments we have to restrict ourselves to pay
# to a single node due to some incompatibility in Eclair, see:
# https://github.com/ACINQ/eclair/issues/1723
use_singe_node = not self.channel_db and constants.net is constants.BitcoinMainnet
split_configurations = suggest_splits(amount_msat, channels_with_funds, single_node=use_singe_node)
self.logger.info(f'suggest_split {amount_msat} returned {len(split_configurations)} configurations')
for s in split_configurations:
self.logger.info(f"trying split configuration: {s[0].values()} rating: {s[1]}")
routes = []
try:
if not self.channel_db:
buckets = defaultdict(list)
for (chan_id, _), part_amount_msat in s[0].items():
chan = self.channels[chan_id]
if part_amount_msat:
buckets[chan.node_id].append((chan_id, part_amount_msat))
for node_id, bucket in buckets.items():
bucket_amount_msat = sum([x[1] for x in bucket])
trampoline_onion, bucket_amount_with_fees, bucket_cltv_delta = create_trampoline_route_and_onion(
amount_msat=bucket_amount_msat,
total_msat=final_total_msat,
min_cltv_expiry=min_cltv_expiry,
my_pubkey=self.node_keypair.pubkey,
invoice_pubkey=invoice_pubkey,
invoice_features=invoice_features,
node_id=node_id,
r_tags=r_tags,
payment_hash=payment_hash,
payment_secret=payment_secret,
local_height=local_height,
trampoline_fee_level=trampoline_fee_level,
use_two_trampolines=use_two_trampolines)
# node_features is only used to determine is_tlv
bucket_payment_secret = os.urandom(32)
bucket_fees = bucket_amount_with_fees - bucket_amount_msat
self.logger.info(f'bucket_fees {bucket_fees}')
for chan_id, part_amount_msat in bucket:
chan = self.channels[chan_id]
margin = chan.available_to_spend(LOCAL, strict=True) - part_amount_msat
delta_fee = min(bucket_fees, margin)
part_amount_msat_with_fees = part_amount_msat + delta_fee
bucket_fees -= delta_fee
route = [
RouteEdge(
start_node=self.node_keypair.pubkey,
end_node=node_id,
short_channel_id=chan.short_channel_id,
fee_base_msat=0,
fee_proportional_millionths=0,
cltv_expiry_delta=0,
node_features=trampoline_features)
]
self.logger.info(f'adding route {part_amount_msat} {delta_fee} {margin}')
routes.append((route, part_amount_msat_with_fees, bucket_amount_with_fees, part_amount_msat, bucket_cltv_delta, bucket_payment_secret, trampoline_onion))
if bucket_fees != 0:
self.logger.info('not enough margin to pay trampoline fee')
raise NoPathFound()
else:
for (chan_id, _), part_amount_msat in s[0].items():
if part_amount_msat:
channel = self.channels[chan_id]
route = self.create_route_for_payment(
amount_msat=part_amount_msat,
invoice_pubkey=invoice_pubkey,
min_cltv_expiry=min_cltv_expiry,
r_tags=r_tags,
invoice_features=invoice_features,
channels=[channel],
full_path=None)
routes.append((route, part_amount_msat, final_total_msat, part_amount_msat, min_cltv_expiry, payment_secret, fwd_trampoline_onion))
self.logger.info(f"found acceptable split configuration: {list(s[0].values())} rating: {s[1]}")
break
except NoPathFound:
continue
else:
raise NoPathFound()
return routes
def create_route_for_payment(
self, *,
amount_msat: int,
invoice_pubkey: bytes,
min_cltv_expiry: int,
r_tags,
invoice_features: int,
channels: List[Channel],
full_path: Optional[LNPaymentPath]) -> Tuple[LNPaymentRoute, int]:
scid_to_my_channels = {
chan.short_channel_id: chan for chan in channels
if chan.short_channel_id is not None
}
# Collect all private edges from route hints.
# Note: if some route hints are multiple edges long, and these paths cross each other,
# we allow our path finding to cross the paths; i.e. the route hints are not isolated.
private_route_edges = {} # type: Dict[ShortChannelID, RouteEdge]
for private_path in r_tags:
# we need to shift the node pubkey by one towards the destination:
private_path_nodes = [edge[0] for edge in private_path][1:] + [invoice_pubkey]
private_path_rest = [edge[1:] for edge in private_path]
start_node = private_path[0][0]
for end_node, edge_rest in zip(private_path_nodes, private_path_rest):
short_channel_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta = edge_rest
short_channel_id = ShortChannelID(short_channel_id)
# if we have a routing policy for this edge in the db, that takes precedence,
# as it is likely from a previous failure
channel_policy = self.channel_db.get_policy_for_node(
short_channel_id=short_channel_id,
node_id=start_node,
my_channels=scid_to_my_channels)
if channel_policy:
fee_base_msat = channel_policy.fee_base_msat
fee_proportional_millionths = channel_policy.fee_proportional_millionths
cltv_expiry_delta = channel_policy.cltv_expiry_delta
node_info = self.channel_db.get_node_info_for_node_id(node_id=end_node)
route_edge = RouteEdge(
start_node=start_node,
end_node=end_node,
short_channel_id=short_channel_id,
fee_base_msat=fee_base_msat,
fee_proportional_millionths=fee_proportional_millionths,
cltv_expiry_delta=cltv_expiry_delta,
node_features=node_info.features if node_info else 0)
private_route_edges[route_edge.short_channel_id] = route_edge
start_node = end_node
# now find a route, end to end: between us and the recipient
try:
route = self.network.path_finder.find_route(
nodeA=self.node_keypair.pubkey,
nodeB=invoice_pubkey,
invoice_amount_msat=amount_msat,
path=full_path,
my_channels=scid_to_my_channels,
private_route_edges=private_route_edges)
except NoChannelPolicy as e:
raise NoPathFound() from e
if not route:
raise NoPathFound()
# test sanity
if not is_route_sane_to_use(route, amount_msat, min_cltv_expiry):
self.logger.info(f"rejecting insane route {route}")
raise NoPathFound()
assert len(route) > 0
if route[-1].end_node != invoice_pubkey:
raise LNPathInconsistent("last node_id != invoice pubkey")
# add features from invoice
route[-1].node_features |= invoice_features
return route
def add_request(self, amount_sat, message, expiry) -> str:
coro = self._add_request_coro(amount_sat, message, expiry)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
try:
return fut.result(timeout=5)
except concurrent.futures.TimeoutError:
raise Exception(_("add invoice timed out"))
@log_exceptions
async def create_invoice(
self, *,
amount_msat: Optional[int],
message: str,
expiry: int) -> Tuple[LnAddr, str]:
timestamp = int(time.time())
routing_hints = await self._calc_routing_hints_for_invoice(amount_msat)
if not routing_hints:
self.logger.info(
"Warning. No routing hints added to invoice. "
"Other clients will likely not be able to send to us.")
# if not all hints are trampoline, do not create trampoline invoice
invoice_features = self.features.for_invoice()
trampoline_hints = []
for r in routing_hints:
node_id, short_channel_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta = r[1][0]
if len(r[1])== 1 and self.is_trampoline_peer(node_id):
trampoline_hints.append(('t', (node_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta)))
payment_preimage = os.urandom(32)
payment_hash = sha256(payment_preimage)
info = PaymentInfo(payment_hash, amount_msat, RECEIVED, PR_UNPAID)
amount_btc = amount_msat/Decimal(COIN*1000) if amount_msat else None
if expiry == 0:
expiry = LN_EXPIRY_NEVER
lnaddr = LnAddr(
paymenthash=payment_hash,
amount=amount_btc,
tags=[
('d', message),
('c', MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE),
('x', expiry),
('9', invoice_features)]
+ routing_hints
+ trampoline_hints,
date=timestamp,
payment_secret=derive_payment_secret_from_payment_preimage(payment_preimage))
invoice = lnencode(lnaddr, self.node_keypair.privkey)
self.save_preimage(payment_hash, payment_preimage)
self.save_payment_info(info)
return lnaddr, invoice
async def _add_request_coro(self, amount_sat: Optional[int], message, expiry: int) -> str:
amount_msat = amount_sat * 1000 if amount_sat is not None else None
lnaddr, invoice = await self.create_invoice(
amount_msat=amount_msat,
message=message,
expiry=expiry)
key = bh2u(lnaddr.paymenthash)
req = LNInvoice.from_bech32(invoice)
self.wallet.add_payment_request(req)
self.wallet.set_label(key, message)
return key
def save_preimage(self, payment_hash: bytes, preimage: bytes):
assert sha256(preimage) == payment_hash
self.preimages[bh2u(payment_hash)] = bh2u(preimage)
self.wallet.save_db()
def get_preimage(self, payment_hash: bytes) -> Optional[bytes]:
r = self.preimages.get(bh2u(payment_hash))
return bfh(r) if r else None
def get_payment_info(self, payment_hash: bytes) -> Optional[PaymentInfo]:
"""returns None if payment_hash is a payment we are forwarding"""
key = payment_hash.hex()
with self.lock:
if key in self.payments:
amount_msat, direction, status = self.payments[key]
return PaymentInfo(payment_hash, amount_msat, direction, status)
def save_payment_info(self, info: PaymentInfo) -> None:
key = info.payment_hash.hex()
assert info.status in SAVED_PR_STATUS
with self.lock:
self.payments[key] = info.amount_msat, info.direction, info.status
self.wallet.save_db()
def check_received_mpp_htlc(self, payment_secret, short_channel_id, htlc: UpdateAddHtlc, expected_msat: int) -> Optional[bool]:
""" return MPP status: True (accepted), False (expired) or None """
payment_hash = htlc.payment_hash
is_expired, is_accepted, htlc_set = self.received_mpp_htlcs.get(payment_secret, (False, False, set()))
if self.get_payment_status(payment_hash) == PR_PAID:
# payment_status is persisted
is_accepted = True
is_expired = False
key = (short_channel_id, htlc)
if key not in htlc_set:
htlc_set.add(key)
if not is_accepted and not is_expired:
total = sum([_htlc.amount_msat for scid, _htlc in htlc_set])
first_timestamp = min([_htlc.timestamp for scid, _htlc in htlc_set])
if self.stopping_soon:
is_expired = True # try to time out pending HTLCs before shutting down
elif time.time() - first_timestamp > self.MPP_EXPIRY:
is_expired = True
elif total == expected_msat:
is_accepted = True
if is_accepted or is_expired:
htlc_set.remove(key)
if len(htlc_set) > 0:
self.received_mpp_htlcs[payment_secret] = is_expired, is_accepted, htlc_set
elif payment_secret in self.received_mpp_htlcs:
self.received_mpp_htlcs.pop(payment_secret)
return True if is_accepted else (False if is_expired else None)
def get_payment_status(self, payment_hash: bytes) -> int:
info = self.get_payment_info(payment_hash)
return info.status if info else PR_UNPAID
def get_invoice_status(self, invoice: LNInvoice) -> int:
key = invoice.rhash
log = self.logs[key]
if key in self.inflight_payments:
return PR_INFLIGHT
# status may be PR_FAILED
status = self.get_payment_status(bfh(key))
if status == PR_UNPAID and log:
status = PR_FAILED
return status
def set_invoice_status(self, key: str, status: int) -> None:
if status == PR_INFLIGHT:
self.inflight_payments.add(key)
elif key in self.inflight_payments:
self.inflight_payments.remove(key)
if status in SAVED_PR_STATUS:
self.set_payment_status(bfh(key), status)
util.trigger_callback('invoice_status', self.wallet, key)
def set_request_status(self, payment_hash: bytes, status: int) -> None:
if self.get_payment_status(payment_hash) != status:
self.set_payment_status(payment_hash, status)
util.trigger_callback('request_status', self.wallet, payment_hash.hex(), status)
def set_payment_status(self, payment_hash: bytes, status: int) -> None:
info = self.get_payment_info(payment_hash)
if info is None:
# if we are forwarding
return
info = info._replace(status=status)
self.save_payment_info(info)
def htlc_fulfilled(self, chan, payment_hash: bytes, htlc_id:int):
util.trigger_callback('htlc_fulfilled', payment_hash, chan.channel_id)
q = self.sent_htlcs.get(payment_hash)
if q:
route, payment_secret, amount_msat, bucket_msat, amount_receiver_msat = self.sent_htlcs_routes[(payment_hash, chan.short_channel_id, htlc_id)]
htlc_log = HtlcLog(
success=True,
route=route,
amount_msat=amount_receiver_msat)
q.put_nowait(htlc_log)
else:
key = payment_hash.hex()
self.set_invoice_status(key, PR_PAID)
util.trigger_callback('payment_succeeded', self.wallet, key)
def htlc_failed(
self,
chan: Channel,
payment_hash: bytes,
htlc_id: int,
error_bytes: Optional[bytes],
failure_message: Optional['OnionRoutingFailure']):
util.trigger_callback('htlc_failed', payment_hash, chan.channel_id)
q = self.sent_htlcs.get(payment_hash)
if q:
# detect if it is part of a bucket
# if yes, wait until the bucket completely failed
key = (payment_hash, chan.short_channel_id, htlc_id)
route, payment_secret, amount_msat, bucket_msat, amount_receiver_msat = self.sent_htlcs_routes[key]
if error_bytes:
# TODO "decode_onion_error" might raise, catch and maybe blacklist/penalise someone?
try:
failure_message, sender_idx = chan.decode_onion_error(error_bytes, route, htlc_id)
except Exception as e:
sender_idx = None
failure_message = OnionRoutingFailure(-1, str(e))
else:
# probably got "update_fail_malformed_htlc". well... who to penalise now?
assert failure_message is not None
sender_idx = None
self.logger.info(f"htlc_failed {failure_message}")
# check sent_buckets if we use trampoline
if not self.channel_db and payment_secret in self.sent_buckets:
amount_sent, amount_failed = self.sent_buckets[payment_secret]
amount_failed += amount_receiver_msat
self.sent_buckets[payment_secret] = amount_sent, amount_failed
if amount_sent != amount_failed:
self.logger.info('bucket still active...')
return
self.logger.info('bucket failed')
amount_receiver_msat = amount_sent
htlc_log = HtlcLog(
success=False,
route=route,
amount_msat=amount_receiver_msat,
error_bytes=error_bytes,
failure_msg=failure_message,
sender_idx=sender_idx)
q.put_nowait(htlc_log)
else:
self.logger.info(f"received unknown htlc_failed, probably from previous session")
key = payment_hash.hex()
self.set_invoice_status(key, PR_UNPAID)
util.trigger_callback('payment_failed', self.wallet, key, '')
async def _calc_routing_hints_for_invoice(self, amount_msat: Optional[int]):
"""calculate routing hints (BOLT-11 'r' field)"""
routing_hints = []
channels = list(self.channels.values())
# do minimal filtering of channels.
# we include channels that cannot *right now* receive (e.g. peer disconnected or balance insufficient)
channels = [chan for chan in channels
if (chan.is_open() and not chan.is_frozen_for_receiving())]
# cap max channels to include to keep QR code reasonably scannable
channels = sorted(channels, key=lambda chan: (not chan.is_active(), -chan.available_to_spend(REMOTE)))
channels = channels[:15]
random.shuffle(channels) # let's not leak channel order
scid_to_my_channels = {chan.short_channel_id: chan for chan in channels
if chan.short_channel_id is not None}
for chan in channels:
chan_id = chan.short_channel_id
assert isinstance(chan_id, bytes), chan_id
channel_info = get_mychannel_info(chan_id, scid_to_my_channels)
# note: as a fallback, if we don't have a channel update for the
# incoming direction of our private channel, we fill the invoice with garbage.
# the sender should still be able to pay us, but will incur an extra round trip
# (they will get the channel update from the onion error)
# at least, that's the theory. https://github.com/lightningnetwork/lnd/issues/2066
fee_base_msat = fee_proportional_millionths = 0
cltv_expiry_delta = 1 # lnd won't even try with zero
missing_info = True
if channel_info:
policy = get_mychannel_policy(channel_info.short_channel_id, chan.node_id, scid_to_my_channels)
if policy:
fee_base_msat = policy.fee_base_msat
fee_proportional_millionths = policy.fee_proportional_millionths
cltv_expiry_delta = policy.cltv_expiry_delta
missing_info = False
if missing_info:
self.logger.info(
f"Warning. Missing channel update for our channel {chan_id}; "
f"filling invoice with incorrect data.")
routing_hints.append(('r', [(
chan.node_id,
chan_id,
fee_base_msat,
fee_proportional_millionths,
cltv_expiry_delta)]))
return routing_hints
def delete_payment(self, payment_hash_hex: str):
try:
with self.lock:
del self.payments[payment_hash_hex]
except KeyError:
return
self.wallet.save_db()
def get_balance(self):
with self.lock:
return Decimal(sum(
chan.balance(LOCAL) if not chan.is_closed() else 0
for chan in self.channels.values())) / 1000
def num_sats_can_send(self) -> Decimal:
can_send = 0
with self.lock:
if self.channels:
for c in self.channels.values():
if c.is_active() and not c.is_frozen_for_sending():
can_send += c.available_to_spend(LOCAL)
# Here we have to guess a fee, because some callers (submarine swaps)
# use this method to initiate a payment, which would otherwise fail.
fee_base_msat = TRAMPOLINE_FEES[3]['fee_base_msat']
fee_proportional_millionths = TRAMPOLINE_FEES[3]['fee_proportional_millionths']
# inverse of fee_for_edge_msat
can_send_minus_fees = (can_send - fee_base_msat) * 1_000_000 // ( 1_000_000 + fee_proportional_millionths)
can_send_minus_fees = max(0, can_send_minus_fees)
return Decimal(can_send_minus_fees) / 1000
def num_sats_can_receive(self) -> Decimal:
with self.lock:
channels = [
c for c in self.channels.values()
if c.is_active() and not c.is_frozen_for_receiving()
]
can_receive = sum([c.available_to_spend(REMOTE) for c in channels]) if channels else 0
return Decimal(can_receive) / 1000
def num_sats_can_receive_no_mpp(self) -> Decimal:
with self.lock:
channels = [
c for c in self.channels.values()
if c.is_active() and not c.is_frozen_for_receiving()
]
can_receive = max([c.available_to_spend(REMOTE) for c in channels]) if channels else 0
return Decimal(can_receive) / 1000
def can_pay_invoice(self, invoice: LNInvoice) -> bool:
return invoice.get_amount_sat() <= self.num_sats_can_send()
def can_receive_invoice(self, invoice: LNInvoice) -> bool:
return invoice.get_amount_sat() <= self.num_sats_can_receive()
async def close_channel(self, chan_id):
chan = self._channels[chan_id]
peer = self._peers[chan.node_id]
return await peer.close_channel(chan_id)
async def force_close_channel(self, chan_id):
# returns txid or raises
chan = self._channels[chan_id]
tx = chan.force_close_tx()
await self.network.broadcast_transaction(tx)
chan.set_state(ChannelState.FORCE_CLOSING)
return tx.txid()
async def try_force_closing(self, chan_id):
# fails silently but sets the state, so that we will retry later
chan = self._channels[chan_id]
tx = chan.force_close_tx()
chan.set_state(ChannelState.FORCE_CLOSING)
await self.network.try_broadcasting(tx, 'force-close')
def remove_channel(self, chan_id):
chan = self.channels[chan_id]
assert chan.can_be_deleted()
with self.lock:
self._channels.pop(chan_id)
self.db.get('channels').pop(chan_id.hex())
for addr in chan.get_wallet_addresses_channel_might_want_reserved():
self.wallet.set_reserved_state_of_address(addr, reserved=False)
util.trigger_callback('channels_updated', self.wallet)
util.trigger_callback('wallet_updated', self.wallet)
@ignore_exceptions
@log_exceptions
async def reestablish_peer_for_given_channel(self, chan: Channel) -> None:
now = time.time()
peer_addresses = []
if not self.channel_db:
addr = trampolines_by_id().get(chan.node_id)
if addr:
peer_addresses.append(addr)
else:
# will try last good address first, from gossip
last_good_addr = self.channel_db.get_last_good_address(chan.node_id)
if last_good_addr:
peer_addresses.append(last_good_addr)
# will try addresses for node_id from gossip
addrs_from_gossip = self.channel_db.get_node_addresses(chan.node_id) or []
for host, port, ts in addrs_from_gossip:
peer_addresses.append(LNPeerAddr(host, port, chan.node_id))
# will try addresses stored in channel storage
peer_addresses += list(chan.get_peer_addresses())
# Done gathering addresses.
# Now select first one that has not failed recently.
for peer in peer_addresses:
if self._can_retry_addr(peer, urgent=True, now=now):
await self._add_peer(peer.host, peer.port, peer.pubkey)
return
async def reestablish_peers_and_channels(self):
while True:
await asyncio.sleep(1)
if self.stopping_soon:
return
for chan in self.channels.values():
if chan.is_closed():
continue
# reestablish
if not chan.should_try_to_reestablish_peer():
continue
peer = self._peers.get(chan.node_id, None)
if peer:
await peer.taskgroup.spawn(peer.reestablish_channel(chan))
else:
await self.taskgroup.spawn(self.reestablish_peer_for_given_channel(chan))
def current_feerate_per_kw(self):
from .simple_config import FEE_LN_ETA_TARGET, FEERATE_FALLBACK_STATIC_FEE, FEERATE_REGTEST_HARDCODED
if constants.net is constants.BitcoinRegtest:
return FEERATE_REGTEST_HARDCODED // 4
feerate_per_kvbyte = self.network.config.eta_target_to_fee(FEE_LN_ETA_TARGET)
if feerate_per_kvbyte is None:
feerate_per_kvbyte = FEERATE_FALLBACK_STATIC_FEE
return max(253, feerate_per_kvbyte // 4)
def create_channel_backup(self, channel_id):
chan = self._channels[channel_id]
# do not backup old-style channels
assert chan.is_static_remotekey_enabled()
peer_addresses = list(chan.get_peer_addresses())
peer_addr = peer_addresses[0]
return ImportedChannelBackupStorage(
node_id = chan.node_id,
privkey = self.node_keypair.privkey,
funding_txid = chan.funding_outpoint.txid,
funding_index = chan.funding_outpoint.output_index,
funding_address = chan.get_funding_address(),
host = peer_addr.host,
port = peer_addr.port,
is_initiator = chan.constraints.is_initiator,
channel_seed = chan.config[LOCAL].channel_seed,
local_delay = chan.config[LOCAL].to_self_delay,
remote_delay = chan.config[REMOTE].to_self_delay,
remote_revocation_pubkey = chan.config[REMOTE].revocation_basepoint.pubkey,
remote_payment_pubkey = chan.config[REMOTE].payment_basepoint.pubkey)
def export_channel_backup(self, channel_id):
xpub = self.wallet.get_fingerprint()
backup_bytes = self.create_channel_backup(channel_id).to_bytes()
assert backup_bytes == ImportedChannelBackupStorage.from_bytes(backup_bytes).to_bytes(), "roundtrip failed"
encrypted = pw_encode_with_version_and_mac(backup_bytes, xpub)
assert backup_bytes == pw_decode_with_version_and_mac(encrypted, xpub), "encrypt failed"
return 'channel_backup:' + encrypted
async def request_force_close(self, channel_id: bytes, *, connect_str=None) -> None:
if channel_id in self.channels:
chan = self.channels[channel_id]
peer = self._peers.get(chan.node_id)
if not peer:
raise Exception('Peer not found')
chan.should_request_force_close = True
peer.close_and_cleanup()
elif connect_str:
peer = await self.add_peer(connect_str)
await peer.trigger_force_close(channel_id)
elif channel_id in self.channel_backups:
await self._request_force_close_from_backup(channel_id)
else:
raise Exception(f'Unknown channel {channel_id.hex()}')
def import_channel_backup(self, data):
assert data.startswith('channel_backup:')
encrypted = data[15:]
xpub = self.wallet.get_fingerprint()
decrypted = pw_decode_with_version_and_mac(encrypted, xpub)
cb_storage = ImportedChannelBackupStorage.from_bytes(decrypted)
channel_id = cb_storage.channel_id()
if channel_id.hex() in self.db.get_dict("channels"):
raise Exception('Channel already in wallet')
self.logger.info(f'importing channel backup: {channel_id.hex()}')
d = self.db.get_dict("imported_channel_backups")
d[channel_id.hex()] = cb_storage
with self.lock:
cb = ChannelBackup(cb_storage, sweep_address=self.sweep_address, lnworker=self)
self._channel_backups[channel_id] = cb
self.wallet.save_db()
util.trigger_callback('channels_updated', self.wallet)
self.lnwatcher.add_channel(cb.funding_outpoint.to_str(), cb.get_funding_address())
def has_conflicting_backup_with(self, remote_node_id: bytes):
""" Returns whether we have an active channel with this node on another device, using same local node id. """
channel_backup_peers = [
cb.node_id for cb in self.channel_backups.values()
if (not cb.is_closed() and cb.get_local_pubkey() == self.node_keypair.pubkey)]
return any(remote_node_id.startswith(cb_peer_nodeid) for cb_peer_nodeid in channel_backup_peers)
def remove_channel_backup(self, channel_id):
chan = self.channel_backups[channel_id]
assert chan.can_be_deleted()
onchain_backups = self.db.get_dict("onchain_channel_backups")
imported_backups = self.db.get_dict("imported_channel_backups")
if channel_id.hex() in onchain_backups:
onchain_backups.pop(channel_id.hex())
elif channel_id.hex() in imported_backups:
imported_backups.pop(channel_id.hex())
else:
raise Exception('Channel not found')
with self.lock:
self._channel_backups.pop(channel_id)
self.wallet.save_db()
util.trigger_callback('channels_updated', self.wallet)
@log_exceptions
async def _request_force_close_from_backup(self, channel_id: bytes):
cb = self.channel_backups.get(channel_id)
if not cb:
raise Exception(f'channel backup not found {self.channel_backups}')
cb = cb.cb # storage
self.logger.info(f'requesting channel force close: {channel_id.hex()}')
if isinstance(cb, ImportedChannelBackupStorage):
node_id = cb.node_id
privkey = cb.privkey
addresses = [(cb.host, cb.port, 0)]
# TODO also try network addresses from gossip db (as it might have changed)
else:
assert isinstance(cb, OnchainChannelBackupStorage)
if not self.channel_db:
raise Exception('Enable gossip first')
node_id = self.network.channel_db.get_node_by_prefix(cb.node_id_prefix)
privkey = self.node_keypair.privkey
addresses = self.network.channel_db.get_node_addresses(node_id)
if not addresses:
raise Exception('Peer not found in gossip database')
for host, port, timestamp in addresses:
peer_addr = LNPeerAddr(host, port, node_id)
transport = LNTransport(privkey, peer_addr, proxy=self.network.proxy)
peer = Peer(self, node_id, transport, is_channel_backup=True)
try:
async with TaskGroup(wait=any) as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.trigger_force_close(channel_id))
return
except Exception as e:
self.logger.info(f'failed to connect {host} {e}')
continue
# TODO close/cleanup the transport
else:
raise Exception('failed to connect')
def maybe_add_backup_from_tx(self, tx):
funding_address = None
node_id_prefix = None
for i, o in enumerate(tx.outputs()):
script_type = get_script_type_from_output_script(o.scriptpubkey)
if script_type == 'p2wsh':
funding_index = i
funding_address = o.address
for o2 in tx.outputs():
if o2.scriptpubkey.startswith(bytes([opcodes.OP_RETURN])):
encrypted_data = o2.scriptpubkey[2:]
data = self.decrypt_cb_data(encrypted_data, funding_address)
if data.startswith(CB_MAGIC_BYTES):
node_id_prefix = data[4:]
if node_id_prefix is None:
return
funding_txid = tx.txid()
cb_storage = OnchainChannelBackupStorage(
node_id_prefix = node_id_prefix,
funding_txid = funding_txid,
funding_index = funding_index,
funding_address = funding_address,
is_initiator = True)
channel_id = cb_storage.channel_id().hex()
if channel_id in self.db.get_dict("channels"):
return
self.logger.info(f"adding backup from tx")
d = self.db.get_dict("onchain_channel_backups")
d[channel_id] = cb_storage
cb = ChannelBackup(cb_storage, sweep_address=self.sweep_address, lnworker=self)
self.wallet.save_db()
with self.lock:
self._channel_backups[bfh(channel_id)] = cb
util.trigger_callback('channels_updated', self.wallet)
self.lnwatcher.add_channel(cb.funding_outpoint.to_str(), cb.get_funding_address())
| 48.687807 | 185 | 0.632389 | [
"MIT"
] | jeroz1/electrum-ravencoin-utd | electrum/lnworker.py | 109,012 | Python |
# **********************************************************************************************************************
# **********************************************************************************************************************
# **********************************************************************************************************************
# *** Using Reinforcement Learning for Load Testing of Video Games ***
# *** Game: CartPole ***
# *** RL-baseline: Cross Entropy Method ***
# *** Play 1000 episodes (still training) and save injected bugs spotted ***
# **********************************************************************************************************************
# **********************************************************************************************************************
# **********************************************************************************************************************
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from collections import namedtuple
HIDDEN_SIZE = 128 # neural network size
BATCH_SIZE = 16 # num episodes
PERCENTILE = 70 # elite episodes
class Net(nn.Module):
def __init__(self, obs_size, hidden_size, n_actions):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions)
)
def forward(self, x):
return self.net(x)
Episode = namedtuple('Episode', field_names=['reward', 'steps'])
EpisodeStep = namedtuple('EpisodeStep', field_names=['observation', 'action'])
def iterate_batches(env, net, batch_size, file):
batch = []
episode_reward = 0.0
episode_steps = []
obs = env.reset()
# OBSERVATION:
# - x coordinate of the stick's center of mass
# - speed
# - angle to the platform
# - angular speed
sm = nn.Softmax(dim=1)
flag_injected_bug_spotted = [False, False]
while True:
obs_v = torch.FloatTensor([obs])
act_probs_v = sm(net(obs_v))
act_probs = act_probs_v.data.numpy()[0]
action = np.random.choice(len(act_probs), p=act_probs)
next_obs, reward, is_done, _ = env.step(action)
if -0.5 < next_obs[0] < -0.45 and not flag_injected_bug_spotted[0]: # and -0.01 < next_obs[2] < 0.00:
file.write('BUG1 ')
flag_injected_bug_spotted[0] = True
if 0.45 < next_obs[0] < 0.5 and not flag_injected_bug_spotted[1]: # and -0.01 < next_obs[2] < 0.00:
file.write('BUG2 ')
flag_injected_bug_spotted[1] = True
episode_reward += reward
episode_steps.append(EpisodeStep(observation=obs, action=action))
if is_done:
file.write('\n')
batch.append(Episode(reward=episode_reward, steps=episode_steps))
episode_reward = 0.0
episode_steps = []
next_obs = env.reset()
flag_injected_bug_spotted = [False, False]
if len(batch) == batch_size:
yield batch
batch = []
obs = next_obs
def filter_batch(batch, percentile):
rewards = list(map(lambda s: s.reward, batch))
reward_bound = np.percentile(rewards, percentile)
reward_mean = float(np.mean(rewards))
train_obs = []
train_act = []
for example in batch:
if example.reward < reward_bound:
continue
train_obs.extend(map(lambda step: step.observation, example.steps))
train_act.extend(map(lambda step: step.action, example.steps))
train_obs_v = torch.FloatTensor(train_obs)
train_act_v = torch.LongTensor(train_act)
return train_obs_v, train_act_v, reward_bound, reward_mean
# **********************************************************************************************************************
# * 1000 episodes start *
# **********************************************************************************************************************
if __name__ == "__main__":
print('\n\n*****************************************************************')
print("* RL-baseline model's playing 1000 episodes (still training)... *")
print('*****************************************************************\n')
env = gym.make("CartPole-v0")
env._max_episode_steps = 1000 # episode length
obs_size = env.observation_space.shape[0]
n_actions = env.action_space.n
net = Net(obs_size, HIDDEN_SIZE, n_actions)
net.load_state_dict(torch.load('./model_rl-baseline'))
net.eval()
objective = nn.CrossEntropyLoss()
optimizer = optim.Adam(params=net.parameters(), lr=0.01)
filename = 'injected_bugs_spotted_RL-baseline.txt'
f = open(filename, 'w+')
for iter_no, batch in enumerate(iterate_batches(env, net, BATCH_SIZE, f)):
obs_v, acts_v, reward_b, reward_m = filter_batch(batch, PERCENTILE)
optimizer.zero_grad()
action_scores_v = net(obs_v)
loss_v = objective(action_scores_v, acts_v)
loss_v.backward()
optimizer.step()
print("%d: loss=%.3f, reward_mean=%.1f, reward_bound=%.1f" % (iter_no, loss_v.item(), reward_m, reward_b))
if iter_no == 63: # 63 * 16 (batch size) = 1008 episodes
print('1k episodes end\n\n')
break
f.close()
lines = [line for line in open(filename, 'r')]
lines_1k = lines[:1000]
count_0bug = 0
count_1bug = 0
count_2bug = 0
for line in lines_1k:
if line.strip() == '':
count_0bug += 1
elif len(line.strip().split()) == 1:
count_1bug += 1
elif len(line.strip().split()) == 2:
count_2bug += 1
print('Report injected bugs spotted:')
print('0 injected bug spotted in %d episodes' % count_0bug)
print('1 injected bug spotted in %d episodes' % count_1bug)
print('2 injected bugs spotted in %d episodes' % count_2bug)
print("\ /\ \n ) ( ') meow!\n( / )\n \(__)|")
# \ /\
# ) ( ')
# ( / )
# \(__)|
| 42.9875 | 120 | 0.447223 | [
"MIT"
] | RosaliaTufano/rlgameauthors | CartPole/CartPole_RL-baseline_1k_episodes.py | 6,878 | Python |
from .base import *
from .visualization import *
from .guided_backprop import *
from .occlusion import *
from .integrated_gradients import *
from .grad_cam import *
from .xrai import *
from .blur_ig import *
| 23.111111 | 35 | 0.769231 | [
"Apache-2.0"
] | aliabd/history-of-interpretation | saliency/__init__.py | 208 | Python |
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""SQLAlchemy storage backend."""
import threading
from oslo_log import log
from oslo_config import cfg
from oslo_utils import uuidutils
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils as db_utils
from oslo_db.sqlalchemy import session as db_session
from sqlalchemy import asc, desc, or_
from sqlalchemy.orm.exc import NoResultFound
from fm.api import config
from fm.common import constants
from fm.common import exceptions
from fm.common import utils
from fm.db import api
from fm.db.sqlalchemy import models
from fm import objects
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_LOCK = threading.Lock()
_FACADE = None
context_manager = enginefacade.transaction_context()
context_manager.configure()
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database)
)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def _session_for_read():
_context = threading.local()
return enginefacade.reader.using(_context)
def _session_for_write():
_context = threading.local()
LOG.debug("_session_for_write CONTEXT=%s" % _context)
return enginefacade.writer.using(_context)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
if not sort_key:
sort_keys = []
elif not isinstance(sort_key, list):
sort_keys = [sort_key]
else:
sort_keys = sort_key
if 'id' not in sort_keys:
sort_keys.append('id')
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
return query.all()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
with _session_for_read() as session:
query = session.query(model, *args)
return query
def add_event_log_filter_by_event_suppression(query, include_suppress):
"""Adds an event_suppression filter to a query.
Filters results by suppression status
:param query: Initial query to add filter to.
:param include_suppress: Value for filtering results by.
:return: Modified query.
"""
query = query.outerjoin(models.EventSuppression,
models.EventLog.event_log_id == models.EventSuppression.alarm_id)
query = query.add_columns(models.EventSuppression.suppression_status)
if include_suppress:
return query
return query.filter(or_(models.EventLog.state == 'log',
models.EventSuppression.suppression_status ==
constants.FM_UNSUPPRESSED))
def add_alarm_filter_by_event_suppression(query, include_suppress):
"""Adds an event_suppression filter to a query.
Filters results by suppression status
:param query: Initial query to add filter to.
:param include_suppress: Value for filtering results by.
:return: Modified query.
"""
query = query.join(models.EventSuppression,
models.Alarm.alarm_id == models.EventSuppression.alarm_id)
query = query.add_columns(models.EventSuppression.suppression_status)
if include_suppress:
return query
return query.filter(models.EventSuppression.suppression_status ==
constants.FM_UNSUPPRESSED)
def add_alarm_mgmt_affecting_by_event_suppression(query):
"""Adds a mgmt_affecting attribute from event_suppression to query.
:param query: Initial query.
:return: Modified query.
"""
query = query.add_columns(models.EventSuppression.mgmt_affecting)
return query
def add_alarm_degrade_affecting_by_event_suppression(query):
"""Adds a degrade_affecting attribute from event_suppression to query.
:param query: Initial query.
:return: Modified query.
"""
query = query.add_columns(models.EventSuppression.degrade_affecting)
return query
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def get_session(self, autocommit=True):
return get_session(autocommit)
def alarm_create(self, values):
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
alarm = models.Alarm()
alarm.update(values)
with _session_for_write() as session:
try:
session.add(alarm)
session.flush()
except db_exc.DBDuplicateEntry:
raise exceptions.AlarmAlreadyExists(uuid=values['uuid'])
return alarm
@objects.objectify(objects.alarm)
def alarm_get(self, uuid):
query = model_query(models.Alarm)
if uuid:
query = query.filter_by(uuid=uuid)
query = add_alarm_filter_by_event_suppression(query, include_suppress=True)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
try:
result = query.one()
except NoResultFound:
raise exceptions.AlarmNotFound(alarm=uuid)
return result
def alarm_get_by_ids(self, alarm_id, entity_instance_id):
query = model_query(models.Alarm)
if alarm_id and entity_instance_id:
query = query.filter_by(alarm_id=alarm_id)
query = query.filter_by(entity_instance_id=entity_instance_id)
query = query.join(models.EventSuppression,
models.Alarm.alarm_id ==
models.EventSuppression.alarm_id)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
try:
result = query.one()
except NoResultFound:
return None
return result
def alarm_get_all(self, uuid=None, alarm_id=None, entity_type_id=None,
entity_instance_id=None, severity=None, alarm_type=None,
limit=None, include_suppress=False):
query = model_query(models.Alarm, read_deleted="no")
query = query.order_by(asc(models.Alarm.severity),
asc(models.Alarm.entity_instance_id),
asc(models.Alarm.id))
if uuid is not None:
query = query.filter(models.Alarm.uuid.contains(uuid))
if alarm_id is not None:
query = query.filter(models.Alarm.alarm_id.contains(alarm_id))
if entity_type_id is not None:
query = query.filter(models.Alarm.entity_type_id.contains(
entity_type_id))
if entity_instance_id is not None:
query = query.filter(models.Alarm.entity_instance_id.contains(
entity_instance_id))
if severity is not None:
query = query.filter(models.Alarm.severity.contains(severity))
if alarm_type is not None:
query = query.filter(models.Alarm.alarm_type.contains(alarm_type))
query = add_alarm_filter_by_event_suppression(query, include_suppress)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
if limit is not None:
query = query.limit(limit)
alarm_list = []
try:
alarm_list = query.all()
except UnicodeDecodeError:
LOG.error("UnicodeDecodeError occurred, "
"return an empty alarm list.")
return alarm_list
@objects.objectify(objects.alarm)
def alarm_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None,
include_suppress=False):
query = model_query(models.Alarm)
query = add_alarm_filter_by_event_suppression(query, include_suppress)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
return _paginate_query(models.Alarm, limit, marker,
sort_key, sort_dir, query)
def alarm_update(self, id, values):
with _session_for_write() as session:
query = model_query(models.Alarm, session=session)
query = query.filter_by(id=id)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exceptions.AlarmNotFound(alarm=id)
return query.one()
def alarm_destroy(self, id):
with _session_for_write() as session:
query = model_query(models.Alarm, session=session)
query = query.filter_by(uuid=id)
try:
query.one()
except NoResultFound:
raise exceptions.AlarmNotFound(alarm=id)
query.delete()
def alarm_destroy_by_ids(self, alarm_id, entity_instance_id):
with _session_for_write() as session:
query = model_query(models.Alarm, session=session)
if alarm_id and entity_instance_id:
query = query.filter_by(alarm_id=alarm_id)
query = query.filter_by(entity_instance_id=entity_instance_id)
try:
query.one()
except NoResultFound:
raise exceptions.AlarmNotFound(alarm=alarm_id)
query.delete()
def event_log_create(self, values):
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
event_log = models.EventLog()
event_log.update(values)
count = self.event_log_get_count()
max_log = config.get_max_event_log()
if count >= int(max_log):
self.delete_oldest_event_log()
with _session_for_write() as session:
try:
session.add(event_log)
session.flush()
except db_exc.DBDuplicateEntry:
raise exceptions.EventLogAlreadyExists(id=values['id'])
return event_log
def event_log_get_count(self):
query = model_query(models.EventLog)
return query.count()
def delete_oldest_event_log(self):
result = self.event_log_get_oldest()
self.event_log_delete(result['id'])
def event_log_delete(self, id):
with _session_for_write() as session:
query = model_query(models.EventLog, session=session)
query = query.filter_by(id=id)
try:
query.one()
except NoResultFound:
raise exceptions.EventLogNotFound(eventLog=id)
query.delete()
def event_log_get_oldest(self):
query = model_query(models.EventLog)
result = query.order_by(asc(models.EventLog.created_at)).limit(1).one()
return result
@objects.objectify(objects.event_log)
def event_log_get(self, uuid):
query = model_query(models.EventLog)
if uuid:
query = query.filter_by(uuid=uuid)
query = add_event_log_filter_by_event_suppression(query,
include_suppress=True)
try:
result = query.one()
except NoResultFound:
raise exceptions.EventLogNotFound(eventLog=uuid)
return result
def _addEventTypeToQuery(self, query, evtType="ALL"):
if evtType is None or not (evtType in ["ALL", "ALARM", "LOG"]):
evtType = "ALL"
if evtType == "ALARM":
query = query.filter(or_(models.EventLog.state == "set",
models.EventLog.state == "clear"))
if evtType == "LOG":
query = query.filter(models.EventLog.state == "log")
return query
@objects.objectify(objects.event_log)
def event_log_get_all(self, uuid=None, event_log_id=None,
entity_type_id=None, entity_instance_id=None,
severity=None, event_log_type=None, start=None,
end=None, limit=None, evtType="ALL", include_suppress=False):
query = model_query(models.EventLog, read_deleted="no")
query = query.order_by(desc(models.EventLog.timestamp))
if uuid is not None:
query = query.filter_by(uuid=uuid)
query = self._addEventTypeToQuery(query, evtType)
if event_log_id is not None:
query = query.filter(models.EventLog.event_log_id.contains(
event_log_id))
if entity_type_id is not None:
query = query.filter(models.EventLog.entity_type_id.contains(
entity_type_id))
if entity_instance_id is not None:
query = query.filter(models.EventLog.entity_instance_id.contains(
entity_instance_id))
if severity is not None:
query = query.filter(models.EventLog.severity.contains(severity))
if event_log_type is not None:
query = query.filter_by(event_log_type=event_log_type)
if start is not None:
query = query.filter(models.EventLog.timestamp >= start)
if end is not None:
query = query.filter(models.EventLog.timestamp <= end)
if include_suppress is not None:
query = add_event_log_filter_by_event_suppression(query,
include_suppress)
if limit is not None:
query = query.limit(limit)
hist_list = []
try:
hist_list = query.all()
except UnicodeDecodeError:
LOG.error("UnicodeDecodeError occurred, "
"return an empty event log list.")
return hist_list
@objects.objectify(objects.event_log)
def event_log_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, evtType="ALL",
include_suppress=False):
query = model_query(models.EventLog)
query = self._addEventTypeToQuery(query, evtType)
query = add_event_log_filter_by_event_suppression(query,
include_suppress)
return _paginate_query(models.EventLog, limit, marker,
sort_key, sort_dir, query)
@objects.objectify(objects.event_suppression)
def event_suppression_get(self, id):
query = model_query(models.EventSuppression)
if uuidutils.is_uuid_like(id):
query = query.filter_by(uuid=id)
else:
query = query.filter_by(id=id)
try:
result = query.one()
except NoResultFound:
raise exceptions.InvalidParameterValue(
err="No event suppression entry found for %s" % id)
return result
@objects.objectify(objects.event_suppression)
def event_suppression_get_all(self, uuid=None, alarm_id=None,
description=None, suppression_status=None, limit=None,
sort_key=None, sort_dir=None):
query = model_query(models.EventSuppression, read_deleted="no")
if uuid is not None:
query = query.filter_by(uuid=uuid)
if alarm_id is not None:
query = query.filter_by(alarm_id=alarm_id)
if description is not None:
query = query.filter_by(description=description)
if suppression_status is not None:
query = query.filter_by(suppression_status=suppression_status)
query = query.filter_by(set_for_deletion=False)
return _paginate_query(models.EventSuppression, limit, None,
sort_key, sort_dir, query)
@objects.objectify(objects.event_suppression)
def event_suppression_update(self, uuid, values):
with _session_for_write() as session:
query = model_query(models.EventSuppression, session=session)
query = query.filter_by(uuid=uuid)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exceptions.NotFound(id)
return query.one()
| 34.859213 | 93 | 0.632357 | [
"Apache-2.0"
] | MarioCarrilloA/fault | fm-rest-api/fm/fm/db/sqlalchemy/api.py | 16,837 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2015 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
"""
Author: Wang, Cong([email protected])
"""
import unittest
from bigflow import transforms
from bigflow.test import test_base
class TestCase(test_base.PipelineBasedTest):
def test_normal(self):
inp = [1, 9, 6, 2]
fn = lambda x, y: x + y
expect = reduce(fn, inp)
result = self._pipeline.parallelize(inp).reduce(fn).get()
self.assertEqual(expect, result)
def test_side_input(self):
inp = [1, 9, 6, 2]
fn = lambda x, y: x + y
expect = reduce(fn, inp)
self._pipeline.parallelize(1)
result = self._pipeline.parallelize(inp).reduce(fn).get()
self.assertEqual(expect, result)
def test_reduce_with_side_input_ptable(self):
si = self._pipeline.parallelize({1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6})
data = self._pipeline.parallelize([1, 2, 3])
result = data.reduce(lambda x, y, d: d[x] + d[y], si)
result_data = result.get()
self.assertEqual(6, result_data)
def test_modify_left_param(self):
""" inner function"""
inp = self._pipeline.parallelize([[1, 2, 3], [6, 5, 4]])
def _sum(x, y):
x[0] += y[0]
x[1] += y[1]
x[2] += y[2]
return x
result = transforms.union(inp.reduce(_sum), inp.reduce(_sum)).get()
self.assertEqual([[7, 7, 7], [7, 7, 7]], result)
if __name__ == "__main__":
unittest.main()
| 31.069444 | 77 | 0.583818 | [
"Apache-2.0"
] | aiplat/bigflow | bigflow_python/python/bigflow/transform_impls/test/reduce_test.py | 2,237 | Python |
from io import BytesIO
from string import ascii_letters
from zipfile import ZipFile
import pytest
from openpyxl.chart import BarChart
from openpyxl.drawing.spreadsheet_drawing import SpreadsheetDrawing
from openpyxl import Workbook
from openpyxl.worksheet.table import Table
@pytest.fixture
def ExcelWriter():
from ..excel import ExcelWriter
return ExcelWriter
@pytest.fixture
def archive():
out = BytesIO()
return ZipFile(out, "w")
def test_worksheet(ExcelWriter, archive):
wb = Workbook()
ws = wb.active
writer = ExcelWriter(wb, archive)
writer._write_worksheets()
assert ws.path[1:] in archive.namelist()
assert ws.path in writer.manifest.filenames
def test_tables(ExcelWriter, archive):
wb = Workbook()
ws = wb.active
ws.append(list(ascii_letters))
ws._rels = []
t = Table(displayName="Table1", ref="A1:D10")
ws.add_table(t)
writer = ExcelWriter(wb, archive)
writer._write_worksheets()
assert t.path[1:] in archive.namelist()
assert t.path in writer.manifest.filenames
def test_drawing(ExcelWriter, archive):
wb = Workbook()
ws = wb.active
drawing = SpreadsheetDrawing()
writer = ExcelWriter(wb, archive)
writer._write_drawing(drawing)
assert drawing.path == '/xl/drawings/drawing1.xml'
assert drawing.path[1:] in archive.namelist()
assert drawing.path in writer.manifest.filenames
def test_write_chart(ExcelWriter, archive):
wb = Workbook()
ws = wb.active
chart = BarChart()
ws.add_chart(chart)
writer = ExcelWriter(wb, archive)
writer._write_worksheets()
assert 'xl/worksheets/sheet1.xml' in archive.namelist()
assert ws.path in writer.manifest.filenames
rel = ws._rels["rId1"]
assert dict(rel) == {'Id': 'rId1', 'Target': '/xl/drawings/drawing1.xml',
'Type':
'http://schemas.openxmlformats.org/officeDocument/2006/relationships/drawing'}
@pytest.mark.pil_required
def test_write_images(datadir, ExcelWriter, archive):
from openpyxl.drawing.image import Image
datadir.chdir()
writer = ExcelWriter(None, archive)
img = Image("plain.png")
writer._images.append(img)
writer._write_images()
archive.close()
zipinfo = archive.infolist()
assert 'xl/media/image1.png' in archive.namelist()
def test_chartsheet(ExcelWriter, archive):
wb = Workbook()
cs = wb.create_chartsheet()
writer = ExcelWriter(wb, archive)
writer._write_chartsheets()
assert cs.path in writer.manifest.filenames
assert cs.path[1:] in writer._archive.namelist()
def test_comment(ExcelWriter, archive):
from openpyxl.comments import Comment
wb = Workbook()
ws = wb.active
ws['B5'].comment = Comment("A comment", "The Author")
writer = ExcelWriter(None, archive)
writer._write_comment(ws)
assert archive.namelist() == ['xl/comments/comment1.xml', 'xl/drawings/commentsDrawing1.vml']
assert '/xl/comments/comment1.xml' in writer.manifest.filenames
assert ws.legacy_drawing == 'xl/drawings/commentsDrawing1.vml'
def test_merge_vba(ExcelWriter, archive, datadir):
from openpyxl import load_workbook
datadir.chdir()
wb = load_workbook("vba+comments.xlsm", keep_vba=True)
writer = ExcelWriter(wb, archive)
writer._merge_vba()
assert set(archive.namelist()) == set([
'xl/vbaProject.bin',
'xl/drawings/vmlDrawing1.vml',
'xl/ctrlProps/ctrlProp3.xml',
'xl/ctrlProps/ctrlProp1.xml',
'xl/ctrlProps/ctrlProp10.xml',
'xl/ctrlProps/ctrlProp9.xml',
'xl/ctrlProps/ctrlProp4.xml',
'xl/ctrlProps/ctrlProp5.xml',
'xl/ctrlProps/ctrlProp6.xml',
'xl/ctrlProps/ctrlProp7.xml',
'xl/ctrlProps/ctrlProp8.xml',
'xl/ctrlProps/ctrlProp2.xml',
])
| 26.356164 | 103 | 0.685551 | [
"MIT"
] | sekcheong/openpyxl | openpyxl/writer/tests/test_excel.py | 3,848 | Python |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re, ast
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in accounting/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('accounting/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='accounting',
version=version,
description='A test accounting app',
author='gvn',
author_email='accounts.org',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 25.615385 | 65 | 0.72973 | [
"MIT"
] | gavindsouza/accounting-app | setup.py | 666 | Python |
from .ShuntCompensator import ShuntCompensator
class LinearShuntCompensator(ShuntCompensator):
'''
A linear shunt compensator has banks or sections with equal admittance values.
:bPerSection: Positive sequence shunt (charging) susceptance per section Default: 0.0
:gPerSection: Positive sequence shunt (charging) conductance per section Default: 0.0
:b0PerSection: Zero sequence shunt (charging) susceptance per section Default: 0.0
:g0PerSection: Zero sequence shunt (charging) conductance per section Default: 0.0
'''
cgmesProfile = ShuntCompensator.cgmesProfile
possibleProfileList = {'class': [cgmesProfile.EQ.value, cgmesProfile.SSH.value, ],
'bPerSection': [cgmesProfile.EQ.value, ],
'gPerSection': [cgmesProfile.EQ.value, ],
'b0PerSection': [cgmesProfile.EQ.value, ],
'g0PerSection': [cgmesProfile.EQ.value, ],
}
serializationProfile = {}
__doc__ += '\n Documentation of parent class ShuntCompensator: \n' + ShuntCompensator.__doc__
def __init__(self, bPerSection = 0.0, gPerSection = 0.0, b0PerSection = 0.0, g0PerSection = 0.0, *args, **kw_args):
super().__init__(*args, **kw_args)
self.bPerSection = bPerSection
self.gPerSection = gPerSection
self.b0PerSection = b0PerSection
self.g0PerSection = g0PerSection
def __str__(self):
str = 'class=LinearShuntCompensator\n'
attributes = self.__dict__
for key in attributes.keys():
str = str + key + '={}\n'.format(attributes[key])
return str
| 36 | 117 | 0.73374 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | CIM-IEC/CIMpy | cimpy/cgmes_v2_4_15/LinearShuntCompensator.py | 1,476 | Python |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally.common import validation
from rally import consts
from rally.plugins.openstack.cleanup import manager as resource_manager
from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils
from rally.task import context
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="cluster_templates", platform="openstack", order=470)
class ClusterTemplateGenerator(context.Context):
"""Context class for generating temporary cluster model for benchmarks."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image_id": {
"type": "string"
},
"flavor_id": {
"type": "string"
},
"master_flavor_id": {
"type": "string"
},
"external_network_id": {
"type": "string"
},
"fixed_network": {
"type": "string"
},
"fixed_subnet": {
"type": "string"
},
"dns_nameserver": {
"type": "string"
},
"docker_volume_size": {
"type": "integer"
},
"labels": {
"type": "string"
},
"coe": {
"type": "string"
},
"http_proxy": {
"type": "string"
},
"https_proxy": {
"type": "string"
},
"no_proxy": {
"type": "string"
},
"network_driver": {
"type": "string"
},
"tls_disabled": {
"type": "boolean"
},
"public": {
"type": "boolean"
},
"registry_enabled": {
"type": "boolean"
},
"volume_driver": {
"type": "string"
},
"server_type": {
"type": "string"
},
"docker_storage_driver": {
"type": "string"
},
"master_lb_enabled": {
"type": "boolean"
}
},
"required": ["image_id", "external_network_id", "coe"],
"additionalProperties": False
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `ClusterTemplate`"))
def setup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
magnum_scenario = magnum_utils.MagnumScenario({
"user": user,
"task": self.context["task"],
"owner_id": self.context["owner_id"],
"config": {"api_versions": self.context["config"].get(
"api_versions", [])}
})
cluster_template = magnum_scenario._create_cluster_template(
**self.config)
ct_uuid = cluster_template.uuid
self.context["tenants"][tenant_id]["cluster_template"] = ct_uuid
@logging.log_task_wrapper(LOG.info, _("Exit context: `ClusterTemplate`"))
def cleanup(self):
resource_manager.cleanup(
names=["magnum.cluster_templates"],
users=self.context.get("users", []),
superclass=magnum_utils.MagnumScenario,
task_id=self.get_owner_id())
| 31.870229 | 78 | 0.523593 | [
"Apache-2.0"
] | boris-42/rally | rally/plugins/openstack/context/magnum/cluster_templates.py | 4,175 | Python |
##########################################################################
# Author: Samuca
#
# brief: returns the int part of number
#
# this is a list exercise available on youtube:
# https://www.youtube.com/playlist?list=PLHz_AreHm4dm6wYOIW20Nyg12TAjmMGT-
##########################################################################
number = float(input("Enter with any number: "))
print("the int part of {} is {}".format(number, int(number)))
#we can also do it with the method trunc, from math
from math import trunc
n = float(input("Enter with other number: "))
print("The int part of {} is {}".format(n, trunc(n)))
| 36.705882 | 76 | 0.540064 | [
"MIT"
] | Samuca47prog/Python_exercises_CursoEmVideo | ex016.py | 624 | Python |
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while(True):
# train part
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)
optimizer.step()
loss_avg.add(cost)
# validation part
if (iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
# training loss and validation loss
loss_log = f'[{iteration+1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{"Current_accuracy":17s}: {current_accuracy:0.3f}, {"Current_norm_ED":17s}: {current_norm_ED:0.2f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{"Best_accuracy":17s}: {best_accuracy:0.3f}, {"Best_norm_ED":17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{"Ground Truth":25s} | {"Prediction":25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
# save model per 1e+5 iter.
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = "2,3"
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', default="/path/to/your/lmdb/train", help='path to training dataset')
parser.add_argument('--valid_data', default="/path/to/your/lmdb/val", help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', default=4, type=int, help='number of data loading workers')
parser.add_argument('--batch_size', default=64, type=int, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=500, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
""" Data processing """
parser.add_argument('--select_data', type=str, default='/',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='1',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, default="TPS", help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, default="ResNet", help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, default="BiLSTM", help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, default="Attn", help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
| 47.62623 | 160 | 0.645257 | [
"MIT"
] | unanan/deep-text-recognition-benchmark-mnn-ncnn | train.py | 14,526 | Python |
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name
"""
Quantum Tomography Module
Description:
This module contains functions for performing quantum state and quantum
process tomography. This includes:
- Functions for generating a set of circuits to
extract tomographically complete sets of measurement data.
- Functions for generating a tomography data set from the
results after the circuits have been executed on a backend.
- Functions for reconstructing a quantum state, or quantum process
(Choi-matrix) from tomography data sets.
Reconstruction Methods:
Currently implemented reconstruction methods are
- Linear inversion by weighted least-squares fitting.
- Fast maximum likelihood reconstruction using ref [1].
References:
[1] J Smolin, JM Gambetta, G Smith, Phys. Rev. Lett. 108, 070502 (2012).
Open access: arXiv:1106.5458 [quant-ph].
Workflow:
The basic functions for performing state and tomography experiments are:
- `tomography_set`, `state_tomography_set`, and `process_tomography_set`
all generates data structures for tomography experiments.
- `create_tomography_circuits` generates the quantum circuits specified
in a `tomography_set` for performing state tomography of the output
- `tomography_data` extracts the results after executing the tomography
circuits and returns it in a data structure used by fitters for state
reconstruction.
- `fit_tomography_data` reconstructs a density matrix or Choi-matrix from
the a set of tomography data.
"""
import logging
from functools import reduce
from itertools import product
from re import match
import numpy as np
from qiskit import QuantumCircuit
from qiskit import QiskitError
from qiskit.tools.qi.qi import vectorize, devectorize, outer
logger = logging.getLogger(__name__)
###############################################################################
# Tomography Bases
###############################################################################
class TomographyBasis(dict):
"""
Dictionary subsclass that includes methods for adding gates to circuits.
A TomographyBasis is a dictionary where the keys index a measurement
and the values are a list of projectors associated to that measurement.
It also includes two optional methods `prep_gate` and `meas_gate`:
- `prep_gate` adds gates to a circuit to prepare the corresponding
basis projector from an initial ground state.
- `meas_gate` adds gates to a circuit to transform the default
Z-measurement into a measurement in the basis.
With the exception of built in bases, these functions do nothing unless
they are specified by the user. They may be set by the data members
`prep_fun` and `meas_fun`. We illustrate this with an example.
Example:
A measurement in the Pauli-X basis has two outcomes corresponding to
the projectors:
`Xp = [[0.5, 0.5], [0.5, 0.5]]`
`Xm = [[0.5, -0.5], [-0.5, 0.5]]`
We can express this as a basis by
`BX = TomographyBasis( {'X': [Xp, Xm]} )`
To specifiy the gates to prepare and measure in this basis we :
```
def BX_prep_fun(circuit, qreg, op):
bas, proj = op
if bas == "X":
if proj == 0:
circuit.u2(0., np.pi, qreg) # apply H
else: # proj == 1
circuit.u2(np.pi, np.pi, qreg) # apply H.X
def BX_prep_fun(circuit, qreg, op):
if op == "X":
circuit.u2(0., np.pi, qreg) # apply H
```
We can then attach these functions to the basis using:
`BX.prep_fun = BX_prep_fun`
`BX.meas_fun = BX_meas_fun`.
Generating function:
A generating function `tomography_basis` exists to create bases in a
single step. Using the above example this can be done by:
```
BX = tomography_basis({'X': [Xp, Xm]},
prep_fun=BX_prep_fun,
meas_fun=BX_meas_fun)
```
"""
prep_fun = None
meas_fun = None
def prep_gate(self, circuit, qreg, op):
"""
Add state preparation gates to a circuit.
Args:
circuit (QuantumCircuit): circuit to add a preparation to.
qreg (tuple(QuantumRegister,int)): quantum register to apply
preparation to.
op (tuple(str, int)): the basis label and index for the
preparation op.
"""
if self.prep_fun is None:
pass
else:
self.prep_fun(circuit, qreg, op)
def meas_gate(self, circuit, qreg, op):
"""
Add measurement gates to a circuit.
Args:
circuit (QuantumCircuit): circuit to add measurement to.
qreg (tuple(QuantumRegister,int)): quantum register being measured.
op (str): the basis label for the measurement.
"""
if self.meas_fun is None:
pass
else:
self.meas_fun(circuit, qreg, op)
def tomography_basis(basis, prep_fun=None, meas_fun=None):
"""
Generate a TomographyBasis object.
See TomographyBasis for further details.abs
Args:
prep_fun (callable) optional: the function which adds preparation
gates to a circuit.
meas_fun (callable) optional: the function which adds measurement
gates to a circuit.
Returns:
TomographyBasis: A tomography basis.
"""
ret = TomographyBasis(basis)
ret.prep_fun = prep_fun
ret.meas_fun = meas_fun
return ret
# PAULI BASIS
# This corresponds to measurements in the X, Y, Z basis where
# Outcomes 0,1 are the +1,-1 eigenstates respectively.
# State preparation is also done in the +1 and -1 eigenstates.
def __pauli_prep_gates(circuit, qreg, op):
"""
Add state preparation gates to a circuit.
"""
bas, proj = op
if bas not in ['X', 'Y', 'Z']:
raise QiskitError("There's no X, Y or Z basis for this Pauli "
"preparation")
if bas == "X":
if proj == 1:
circuit.u2(np.pi, np.pi, qreg) # H.X
else:
circuit.u2(0., np.pi, qreg) # H
elif bas == "Y":
if proj == 1:
circuit.u2(-0.5 * np.pi, np.pi, qreg) # S.H.X
else:
circuit.u2(0.5 * np.pi, np.pi, qreg) # S.H
elif bas == "Z" and proj == 1:
circuit.u3(np.pi, 0., np.pi, qreg) # X
def __pauli_meas_gates(circuit, qreg, op):
"""
Add state measurement gates to a circuit.
"""
if op not in ['X', 'Y', 'Z']:
raise QiskitError("There's no X, Y or Z basis for this Pauli "
"measurement")
if op == "X":
circuit.u2(0., np.pi, qreg) # H
elif op == "Y":
circuit.u2(0., 0.5 * np.pi, qreg) # H.S^*
__PAULI_BASIS_OPS = {
'X':
[np.array([[0.5, 0.5], [0.5, 0.5]]),
np.array([[0.5, -0.5], [-0.5, 0.5]])],
'Y': [
np.array([[0.5, -0.5j], [0.5j, 0.5]]),
np.array([[0.5, 0.5j], [-0.5j, 0.5]])
],
'Z': [np.array([[1, 0], [0, 0]]),
np.array([[0, 0], [0, 1]])]
}
# Create the actual basis
PAULI_BASIS = tomography_basis(
__PAULI_BASIS_OPS,
prep_fun=__pauli_prep_gates,
meas_fun=__pauli_meas_gates)
# SIC-POVM BASIS
def __sic_prep_gates(circuit, qreg, op):
"""
Add state preparation gates to a circuit.
"""
bas, proj = op
if bas != 'S':
raise QiskitError('Not in SIC basis!')
theta = -2 * np.arctan(np.sqrt(2))
if proj == 1:
circuit.u3(theta, np.pi, 0.0, qreg)
elif proj == 2:
circuit.u3(theta, np.pi / 3, 0.0, qreg)
elif proj == 3:
circuit.u3(theta, -np.pi / 3, 0.0, qreg)
__SIC_BASIS_OPS = {
'S': [
np.array([[1, 0], [0, 0]]),
np.array([[1, np.sqrt(2)], [np.sqrt(2), 2]]) / 3,
np.array([[1, np.exp(np.pi * 2j / 3) * np.sqrt(2)],
[np.exp(-np.pi * 2j / 3) * np.sqrt(2), 2]]) / 3,
np.array([[1, np.exp(-np.pi * 2j / 3) * np.sqrt(2)],
[np.exp(np.pi * 2j / 3) * np.sqrt(2), 2]]) / 3
]
}
SIC_BASIS = tomography_basis(__SIC_BASIS_OPS, prep_fun=__sic_prep_gates)
###############################################################################
# Tomography Set and labels
###############################################################################
def tomography_set(meas_qubits,
meas_basis='Pauli',
prep_qubits=None,
prep_basis=None):
"""
Generate a dictionary of tomography experiment configurations.
This returns a data structure that is used by other tomography functions
to generate state and process tomography circuits, and extract tomography
data from results after execution on a backend.
Quantum State Tomography:
Be default it will return a set for performing Quantum State
Tomography where individual qubits are measured in the Pauli basis.
A custom measurement basis may also be used by defining a user
`tomography_basis` and passing this in for the `meas_basis` argument.
Quantum Process Tomography:
A quantum process tomography set is created by specifying a preparation
basis along with a measurement basis. The preparation basis may be a
user defined `tomography_basis`, or one of the two built in basis 'SIC'
or 'Pauli'.
- SIC: Is a minimal symmetric informationally complete preparation
basis for 4 states for each qubit (4 ^ number of qubits total
preparation states). These correspond to the |0> state and the 3
other vertices of a tetrahedron on the Bloch-sphere.
- Pauli: Is a tomographically overcomplete preparation basis of the six
eigenstates of the 3 Pauli operators (6 ^ number of qubits
total preparation states).
Args:
meas_qubits (list): The qubits being measured.
meas_basis (tomography_basis or str): The qubit measurement basis.
The default value is 'Pauli'.
prep_qubits (list or None): The qubits being prepared. If None then
meas_qubits will be used for process tomography experiments.
prep_basis (tomography_basis or None): The optional qubit preparation
basis. If no basis is specified state tomography will be performed
instead of process tomography. A built in basis may be specified by
'SIC' or 'Pauli' (SIC basis recommended for > 2 qubits).
Returns:
dict: A dict of tomography configurations that can be parsed by
`create_tomography_circuits` and `tomography_data` functions
for implementing quantum tomography experiments. This output contains
fields "qubits", "meas_basis", "circuits". It may also optionally
contain a field "prep_basis" for process tomography experiments.
```
{
'qubits': qubits (list[ints]),
'meas_basis': meas_basis (tomography_basis),
'circuit_labels': (list[string]),
'circuits': (list[dict]) # prep and meas configurations
# optionally for process tomography experiments:
'prep_basis': prep_basis (tomography_basis)
}
```
Raises:
QiskitError: if the Qubits argument is not a list.
"""
if not isinstance(meas_qubits, list):
raise QiskitError('Qubits argument must be a list')
num_of_qubits = len(meas_qubits)
if prep_qubits is None:
prep_qubits = meas_qubits
if not isinstance(prep_qubits, list):
raise QiskitError('prep_qubits argument must be a list')
if len(prep_qubits) != len(meas_qubits):
raise QiskitError('meas_qubits and prep_qubitsare different length')
if isinstance(meas_basis, str):
if meas_basis.lower() == 'pauli':
meas_basis = PAULI_BASIS
if isinstance(prep_basis, str):
if prep_basis.lower() == 'pauli':
prep_basis = PAULI_BASIS
elif prep_basis.lower() == 'sic':
prep_basis = SIC_BASIS
circuits = []
circuit_labels = []
# add meas basis configs
if prep_basis is None:
# State Tomography
for meas_product in product(meas_basis.keys(), repeat=num_of_qubits):
meas = dict(zip(meas_qubits, meas_product))
circuits.append({'meas': meas})
# Make label
label = '_meas_'
for qubit, op in meas.items():
label += '%s(%d)' % (op[0], qubit)
circuit_labels.append(label)
return {'qubits': meas_qubits,
'circuits': circuits,
'circuit_labels': circuit_labels,
'meas_basis': meas_basis}
# Process Tomography
num_of_s = len(list(prep_basis.values())[0])
plst_single = [(b, s)
for b in prep_basis.keys()
for s in range(num_of_s)]
for plst_product in product(plst_single, repeat=num_of_qubits):
for meas_product in product(meas_basis.keys(),
repeat=num_of_qubits):
prep = dict(zip(prep_qubits, plst_product))
meas = dict(zip(meas_qubits, meas_product))
circuits.append({'prep': prep, 'meas': meas})
# Make label
label = '_prep_'
for qubit, op in prep.items():
label += '%s%d(%d)' % (op[0], op[1], qubit)
label += '_meas_'
for qubit, op in meas.items():
label += '%s(%d)' % (op[0], qubit)
circuit_labels.append(label)
return {'qubits': meas_qubits,
'circuits': circuits,
'circuit_labels': circuit_labels,
'prep_basis': prep_basis,
'meas_basis': meas_basis}
def state_tomography_set(qubits, meas_basis='Pauli'):
"""
Generate a dictionary of state tomography experiment configurations.
This returns a data structure that is used by other tomography functions
to generate state and process tomography circuits, and extract tomography
data from results after execution on a backend.
Quantum State Tomography:
Be default it will return a set for performing Quantum State
Tomography where individual qubits are measured in the Pauli basis.
A custom measurement basis may also be used by defining a user
`tomography_basis` and passing this in for the `meas_basis` argument.
Quantum Process Tomography:
A quantum process tomography set is created by specifying a preparation
basis along with a measurement basis. The preparation basis may be a
user defined `tomography_basis`, or one of the two built in basis 'SIC'
or 'Pauli'.
- SIC: Is a minimal symmetric informationally complete preparation
basis for 4 states for each qubit (4 ^ number of qubits total
preparation states). These correspond to the |0> state and the 3
other vertices of a tetrahedron on the Bloch-sphere.
- Pauli: Is a tomographically overcomplete preparation basis of the six
eigenstates of the 3 Pauli operators (6 ^ number of qubits
total preparation states).
Args:
qubits (list): The qubits being measured.
meas_basis (tomography_basis or str): The qubit measurement basis.
The default value is 'Pauli'.
Returns:
dict: A dict of tomography configurations that can be parsed by
`create_tomography_circuits` and `tomography_data` functions
for implementing quantum tomography experiments. This output contains
fields "qubits", "meas_basis", "circuits".
```
{
'qubits': qubits (list[ints]),
'meas_basis': meas_basis (tomography_basis),
'circuit_labels': (list[string]),
'circuits': (list[dict]) # prep and meas configurations
}
```
"""
return tomography_set(qubits, meas_basis=meas_basis)
def process_tomography_set(meas_qubits, meas_basis='Pauli',
prep_qubits=None, prep_basis='SIC'):
"""
Generate a dictionary of process tomography experiment configurations.
This returns a data structure that is used by other tomography functions
to generate state and process tomography circuits, and extract tomography
data from results after execution on a backend.
A quantum process tomography set is created by specifying a preparation
basis along with a measurement basis. The preparation basis may be a
user defined `tomography_basis`, or one of the two built in basis 'SIC'
or 'Pauli'.
- SIC: Is a minimal symmetric informationally complete preparation
basis for 4 states for each qubit (4 ^ number of qubits total
preparation states). These correspond to the |0> state and the 3
other vertices of a tetrahedron on the Bloch-sphere.
- Pauli: Is a tomographically overcomplete preparation basis of the six
eigenstates of the 3 Pauli operators (6 ^ number of qubits
total preparation states).
Args:
meas_qubits (list): The qubits being measured.
meas_basis (tomography_basis or str): The qubit measurement basis.
The default value is 'Pauli'.
prep_qubits (list or None): The qubits being prepared. If None then
meas_qubits will be used for process tomography experiments.
prep_basis (tomography_basis or str): The qubit preparation basis.
The default value is 'SIC'.
Returns:
dict: A dict of tomography configurations that can be parsed by
`create_tomography_circuits` and `tomography_data` functions
for implementing quantum tomography experiments. This output contains
fields "qubits", "meas_basis", "prep_basus", circuits".
```
{
'qubits': qubits (list[ints]),
'meas_basis': meas_basis (tomography_basis),
'prep_basis': prep_basis (tomography_basis),
'circuit_labels': (list[string]),
'circuits': (list[dict]) # prep and meas configurations
}
```
"""
return tomography_set(meas_qubits, meas_basis=meas_basis,
prep_qubits=prep_qubits, prep_basis=prep_basis)
def tomography_circuit_names(tomo_set, name=''):
"""
Return a list of tomography circuit names.
The returned list is the same as the one returned by
`create_tomography_circuits` and can be used by a QuantumProgram
to execute tomography circuits and extract measurement results.
Args:
tomo_set (tomography_set): a tomography set generated by
`tomography_set`.
name (str): the name of the base QuantumCircuit used by the
tomography experiment.
Returns:
list: A list of circuit names.
"""
return [name + l for l in tomo_set['circuit_labels']]
###############################################################################
# Tomography circuit generation
###############################################################################
def create_tomography_circuits(circuit, qreg, creg, tomoset):
"""
Add tomography measurement circuits to a QuantumProgram.
The quantum program must contain a circuit 'name', which is treated as a
state preparation circuit for state tomography, or as teh circuit being
measured for process tomography. This function then appends the circuit
with a set of measurements specified by the input `tomography_set`,
optionally it also prepends the circuit with state preparation circuits if
they are specified in the `tomography_set`.
For n-qubit tomography with a tomographically complete set of preparations
and measurements this results in $4^n 3^n$ circuits being added to the
quantum program.
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
tomoset (tomography_set): the dict of tomography configurations.
Returns:
list: A list of quantum tomography circuits for the input circuit.
Raises:
QiskitError: if circuit is not a valid QuantumCircuit
Example:
For a tomography set specifying state tomography of qubit-0 prepared
by a circuit 'circ' this would return:
```
['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']
```
For process tomography of the same circuit with preparation in the
SIC-POVM basis it would return:
```
[
'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',
'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',
'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',
'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',
'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',
'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'
]
```
"""
if not isinstance(circuit, QuantumCircuit):
raise QiskitError('Input circuit must be a QuantumCircuit object')
dics = tomoset['circuits']
labels = tomography_circuit_names(tomoset, circuit.name)
tomography_circuits = []
for label, conf in zip(labels, dics):
tmp = circuit
# Add prep circuits
if 'prep' in conf:
prep = QuantumCircuit(qreg, creg, name='tmp_prep')
for qubit, op in conf['prep'].items():
tomoset['prep_basis'].prep_gate(prep, qreg[qubit], op)
prep.barrier(qreg[qubit]) # pylint: disable=no-member
tmp = prep + tmp
# Add measurement circuits
meas = QuantumCircuit(qreg, creg, name='tmp_meas')
for qubit, op in conf['meas'].items():
meas.barrier(qreg[qubit]) # pylint: disable=no-member
tomoset['meas_basis'].meas_gate(meas, qreg[qubit], op)
meas.measure(qreg[qubit], creg[qubit])
tmp = tmp + meas
# Add label to the circuit
tmp.name = label
tomography_circuits.append(tmp)
logger.info('>> created tomography circuits for "%s"', circuit.name)
return tomography_circuits
###############################################################################
# Get results data
###############################################################################
def tomography_data(results, name, tomoset):
"""
Return a results dict for a state or process tomography experiment.
Args:
results (Result): Results from execution of a process tomography
circuits on a backend.
name (string): The name of the circuit being reconstructed.
tomoset (tomography_set): the dict of tomography configurations.
Returns:
list: A list of dicts for the outcome of each process tomography
measurement circuit.
"""
labels = tomography_circuit_names(tomoset, name)
circuits = tomoset['circuits']
data = []
prep = None
for j, _ in enumerate(labels):
counts = marginal_counts(results.get_counts(labels[j]),
tomoset['qubits'])
shots = sum(counts.values())
meas = circuits[j]['meas']
prep = circuits[j].get('prep', None)
meas_qubits = sorted(meas.keys())
if prep:
prep_qubits = sorted(prep.keys())
circuit = {}
for c in counts.keys():
circuit[c] = {}
circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[-1 - k]))
for k in range(len(meas_qubits))]
if prep:
circuit[c]['prep'] = [prep[prep_qubits[k]]
for k in range(len(prep_qubits))]
data.append({'counts': counts, 'shots': shots, 'circuit': circuit})
ret = {'data': data, 'meas_basis': tomoset['meas_basis']}
if prep:
ret['prep_basis'] = tomoset['prep_basis']
return ret
def marginal_counts(counts, meas_qubits):
"""
Compute the marginal counts for a subset of measured qubits.
Args:
counts (dict): the counts returned from a backend ({str: int}).
meas_qubits (list[int]): the qubits to return the marginal
counts distribution for.
Returns:
dict: A counts dict for the meas_qubits.abs
Example: if `counts = {'00': 10, '01': 5}`
`marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.
`marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.
"""
# pylint: disable=cell-var-from-loop
# Extract total number of qubits from count keys
num_of_qubits = len(list(counts.keys())[0])
# keys for measured qubits only
qs = sorted(meas_qubits, reverse=True)
meas_keys = count_keys(len(qs))
# get regex match strings for summing outcomes of other qubits
rgx = [
reduce(lambda x, y: (key[qs.index(y)] if y in qs else '\\d') + x,
range(num_of_qubits), '') for key in meas_keys
]
# build the return list
meas_counts = []
for m in rgx:
c = 0
for key, val in counts.items():
if match(m, key):
c += val
meas_counts.append(c)
# return as counts dict on measured qubits only
return dict(zip(meas_keys, meas_counts))
def count_keys(n):
"""Generate outcome bitstrings for n-qubits.
Args:
n (int): the number of qubits.
Returns:
list: A list of bitstrings ordered as follows:
Example: n=2 returns ['00', '01', '10', '11'].
"""
return [bin(j)[2:].zfill(n) for j in range(2**n)]
###############################################################################
# Tomographic Reconstruction functions.
###############################################################################
def fit_tomography_data(tomo_data, method='wizard', options=None):
"""
Reconstruct a density matrix or process-matrix from tomography data.
If the input data is state_tomography_data the returned operator will
be a density matrix. If the input data is process_tomography_data the
returned operator will be a Choi-matrix in the column-vectorization
convention.
Args:
tomo_data (dict): process tomography measurement data.
method (str): the fitting method to use.
Available methods:
- 'wizard' (default)
- 'leastsq'
options (dict or None): additional options for fitting method.
Returns:
numpy.array: The fitted operator.
Available methods:
- 'wizard' (Default): The returned operator will be constrained to be
positive-semidefinite.
Options:
- 'trace': the trace of the returned operator.
The default value is 1.
- 'beta': hedging parameter for computing frequencies from
zero-count data. The default value is 0.50922.
- 'epsilon: threshold for truncating small eigenvalues to zero.
The default value is 0
- 'leastsq': Fitting without positive-semidefinite constraint.
Options:
- 'trace': Same as for 'wizard' method.
- 'beta': Same as for 'wizard' method.
Raises:
Exception: if the `method` parameter is not valid.
"""
if isinstance(method, str) and method.lower() in ['wizard', 'leastsq']:
# get options
trace = __get_option('trace', options)
beta = __get_option('beta', options)
# fit state
rho = __leastsq_fit(tomo_data, trace=trace, beta=beta)
if method == 'wizard':
# Use wizard method to constrain positivity
epsilon = __get_option('epsilon', options)
rho = __wizard(rho, epsilon=epsilon)
return rho
else:
raise Exception('Invalid reconstruction method "%s"' % method)
def __get_option(opt, options):
"""
Return an optional value or None if not found.
"""
if options is not None:
if opt in options:
return options[opt]
return None
###############################################################################
# Fit Method: Linear Inversion
###############################################################################
def __leastsq_fit(tomo_data, weights=None, trace=None, beta=None):
"""
Reconstruct a state from unconstrained least-squares fitting.
Args:
tomo_data (list[dict]): state or process tomography data.
weights (list or array or None): weights to use for least squares
fitting. The default is standard deviation from a binomial
distribution.
trace (float or None): trace of returned operator. The default is 1.
beta (float or None): hedge parameter (>=0) for computing frequencies
from zero-count data. The default value is 0.50922.
Returns:
numpy.array: A numpy array of the reconstructed operator.
"""
if trace is None:
trace = 1. # default to unit trace
data = tomo_data['data']
keys = data[0]['circuit'].keys()
# Get counts and shots
counts = []
shots = []
ops = []
for dat in data:
for key in keys:
counts.append(dat['counts'][key])
shots.append(dat['shots'])
projectors = dat['circuit'][key]
op = __projector(projectors['meas'], tomo_data['meas_basis'])
if 'prep' in projectors:
op_prep = __projector(projectors['prep'],
tomo_data['prep_basis'])
op = np.kron(op_prep.conj(), op)
ops.append(op)
# Convert counts to frequencies
counts = np.array(counts)
shots = np.array(shots)
freqs = counts / shots
# Use hedged frequencies to calculate least squares fitting weights
if weights is None:
if beta is None:
beta = 0.50922
K = len(keys)
freqs_hedged = (counts + beta) / (shots + K * beta)
weights = np.sqrt(shots / (freqs_hedged * (1 - freqs_hedged)))
return __tomo_linear_inv(freqs, ops, weights, trace=trace)
def __projector(op_list, basis):
"""Returns a projectors.
"""
ret = 1
# list is from qubit 0 to 1
for op in op_list:
label, eigenstate = op
ret = np.kron(basis[label][eigenstate], ret)
return ret
def __tomo_linear_inv(freqs, ops, weights=None, trace=None):
"""
Reconstruct a matrix through linear inversion.
Args:
freqs (list[float]): list of observed frequences.
ops (list[np.array]): list of corresponding projectors.
weights (list[float] or array_like):
weights to be used for weighted fitting.
trace (float or None): trace of returned operator.
Returns:
numpy.array: A numpy array of the reconstructed operator.
"""
# get weights matrix
if weights is not None:
W = np.array(weights)
if W.ndim == 1:
W = np.diag(W)
# Get basis S matrix
S = np.array([vectorize(m).conj()
for m in ops]).reshape(len(ops), ops[0].size)
if weights is not None:
S = np.dot(W, S) # W.S
# get frequencies vec
v = np.array(freqs) # |f>
if weights is not None:
v = np.dot(W, freqs) # W.|f>
Sdg = S.T.conj() # S^*.W^*
inv = np.linalg.pinv(np.dot(Sdg, S)) # (S^*.W^*.W.S)^-1
# linear inversion of freqs
ret = devectorize(np.dot(inv, np.dot(Sdg, v)))
# renormalize to input trace value
if trace is not None:
ret = trace * ret / np.trace(ret)
return ret
###############################################################################
# Fit Method: Wizard
###############################################################################
def __wizard(rho, epsilon=None):
"""
Returns the nearest positive semidefinite operator to an operator.
This method is based on reference [1]. It constrains positivity
by setting negative eigenvalues to zero and rescaling the positive
eigenvalues.
Args:
rho (array_like): the input operator.
epsilon(float or None): threshold (>=0) for truncating small
eigenvalues values to zero.
Returns:
numpy.array: A positive semidefinite numpy array.
"""
if epsilon is None:
epsilon = 0. # default value
dim = len(rho)
rho_wizard = np.zeros([dim, dim])
v, w = np.linalg.eigh(rho) # v eigenvecrors v[0] < v[1] <...
for j in range(dim):
if v[j] < epsilon:
tmp = v[j]
v[j] = 0.
# redistribute loop
x = 0.
for k in range(j + 1, dim):
x += tmp / (dim - (j + 1))
v[k] = v[k] + tmp / (dim - (j + 1))
for j in range(dim):
rho_wizard = rho_wizard + v[j] * outer(w[:, j])
return rho_wizard
###############################################################
# Wigner function tomography
###############################################################
def build_wigner_circuits(circuit, phis, thetas, qubits,
qreg, creg):
"""Create the circuits to rotate to points in phase space
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
phis (np.matrix[[complex]]): phis
thetas (np.matrix[[complex]]): thetas
qubits (list[int]): a list of the qubit indexes of qreg to be measured.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
Returns:
list: A list of names of the added wigner function circuits.
Raises:
QiskitError: if circuit is not a valid QuantumCircuit.
"""
if not isinstance(circuit, QuantumCircuit):
raise QiskitError('Input circuit must be a QuantumCircuit object')
tomography_circuits = []
points = len(phis[0])
for point in range(points):
label = '_wigner_phase_point'
label += str(point)
tmp_circ = QuantumCircuit(qreg, creg, name=label)
for qubit, _ in enumerate(qubits):
tmp_circ.u3(thetas[qubit][point], 0, # pylint: disable=no-member
phis[qubit][point], qreg[qubits[qubit]])
tmp_circ.measure(qreg[qubits[qubit]], creg[qubits[qubit]])
# Add to original circuit
tmp_circ = circuit + tmp_circ
tmp_circ.name = circuit.name + label
tomography_circuits.append(tmp_circ)
logger.info('>> Created Wigner function circuits for "%s"', circuit.name)
return tomography_circuits
def wigner_data(q_result, meas_qubits, labels, shots=None):
"""Get the value of the Wigner function from measurement results.
Args:
q_result (Result): Results from execution of a state tomography
circuits on a backend.
meas_qubits (list[int]): a list of the qubit indexes measured.
labels (list[str]): a list of names of the circuits
shots (int): number of shots
Returns:
list: The values of the Wigner function at measured points in
phase space
"""
num = len(meas_qubits)
dim = 2**num
p = [0.5 + 0.5 * np.sqrt(3), 0.5 - 0.5 * np.sqrt(3)]
parity = 1
for i in range(num):
parity = np.kron(parity, p)
w = [0] * len(labels)
wpt = 0
counts = [marginal_counts(q_result.get_counts(circ), meas_qubits)
for circ in labels]
for entry in counts:
x = [0] * dim
for i in range(dim):
if bin(i)[2:].zfill(num) in entry:
x[i] = float(entry[bin(i)[2:].zfill(num)])
if shots is None:
shots = np.sum(x)
for i in range(dim):
w[wpt] = w[wpt] + (x[i] / shots) * parity[i]
wpt += 1
return w
| 36.681863 | 79 | 0.591025 | [
"Apache-2.0"
] | filemaster/qiskit-terra | qiskit/tools/qcvv/tomography.py | 37,012 | Python |
"""Test Pyvista camera parameters."""
import os
import numpy as np
import pyvista as pv
import nibabel as nb
FILE = "/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-04/flattening/sub-04_ses-T2s_segm_rim_CS_LH_v02_borderized_multilaterate_perimeter_chunk_T2star_flat_400x400_voronoi.nii.gz"
OUTDIR = "/home/faruk/data2/DATA_MRI_NIFTI/derived/movies/test_frames"
MIN, MAX = 20, 45
BACKGROUND = "black"
RESOLUTION = (720, 720)
CMAP = "gray"
# -----------------------------------------------------------------------------
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}".format(OUTDIR))
nii = nb.load(FILE)
dims = nii.shape
data = nii.get_fdata()
# Normalize
data[data > MAX] = MAX
data -= MIN
data /= MAX - MIN
data[data < 0] = 0
data *= 255
# Prep pyvista plotter
p = pv.Plotter(window_size=RESOLUTION, off_screen=True)
opacity = np.ones(255)
opacity[0] = 0
p.add_volume(data, cmap="gray", opacity=opacity)
p.set_background(BACKGROUND)
# p.camera.roll = 0
p.camera_position = 'yz'
p.camera.elevation = 15
print("Roll : {}".format(p.camera.roll))
print("Elevation : {}".format(p.camera.elevation))
print("Azimuth : {}".format(p.camera.azimuth))
print("Position : {}".format(p.camera.position))
print("Focal point : {}".format(p.camera.focal_point))
print("Clip range : {}".format(p.camera.clipping_range))
CAMPOS_DEFAULT = p.camera_position
# Manipulate camera
# -----------------------------------------------------------------------------
p.camera_position = CAMPOS_DEFAULT
p.camera.elevation += 30
for i in range(90):
p.show(auto_close=False)
out_name = "03_azimuth-{}.png".format(str(i).zfill(3))
p.screenshot(os.path.join(OUTDIR, out_name))
p.camera.azimuth += 4
p.camera.azimuth %= 360
print("Finished.")
| 27.846154 | 177 | 0.654144 | [
"BSD-3-Clause"
] | ofgulban/meso-MRI | scripts/wip/anim-test_camera.py | 1,810 | Python |
from typing import List, TYPE_CHECKING
import numpy as np
import scipy.sparse
from qdrant_openapi_client.models.models import Distance
from docarray.math.helper import EPSILON
if TYPE_CHECKING:
from docarray.types import ArrayType
class QdrantStorageHelper:
@classmethod
def embedding_to_array(
cls, embedding: 'ArrayType', default_dim: int
) -> List[float]:
if embedding is None:
embedding = np.random.rand(default_dim)
else:
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
if np.all(embedding == 0):
embedding = embedding + EPSILON
return embedding.tolist()
DISTANCES = {
'cosine': Distance.COSINE,
'euclidean': Distance.EUCLID,
'dot': Distance.DOT,
}
| 23.710526 | 56 | 0.662597 | [
"Apache-2.0"
] | fastflair/docarray | docarray/array/storage/qdrant/helper.py | 901 | Python |
#!/usr/bin/env python3
import torch
import torch.cuda.profiler as profiler
from apex import pyprof
class Foo(torch.jit.ScriptModule):
def __init__(self, size):
super(Foo, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
@torch.jit.script_method
def forward(self, input):
return self.n*input + self.m
#Initialize pyprof after the JIT step
pyprof.nvtx.init()
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(Foo, 'forward')
foo = Foo(4)
foo.cuda()
x = torch.ones(4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x)
profiler.stop()
print(z)
| 21.53125 | 53 | 0.690856 | [
"BSD-3-Clause"
] | 58733511/apex | apex/pyprof/examples/jit/jit_script_method.py | 689 | Python |
from pathlib import Path
from typing import List, Optional, Dict, Union, Tuple, Literal, Sequence, Any
import fsspec
import numpy as np
from xarray import DataArray
from dataclasses import asdict, dataclass
import json
from ..io.mrc import mrc_to_dask
from ..io import read
import dask.array as da
import dacite
from xarray_multiscale.metadata.util import SpatialTransform
CONTAINER_TYPES ={'mrc', 'n5', 'precomputed'}
DTYPE_FORMATS = {"uint16": "n5", "uint8": "precomputed", "uint64": "n5"}
CONTENT_TYPES = {"em", "lm", "prediction", "segmentation", "analysis"}
ContainerTypes = Literal['n5', 'precomputed', 'mrc']
@dataclass
class VolumeStorageSpec:
kvStore: str
containerType: ContainerTypes
containerPath: str
dataPath: str
def toURI(self):
return f'{self.kvStore}://{Path(self.containerPath).with_suffix("." + self.containerType).joinpath(self.dataPath)}'
def __post_init__(self):
if self.containerType not in CONTAINER_TYPES:
raise ValueError(
f"containerType must be one of {CONTAINER_TYPES}"
)
@dataclass
class ContrastLimits:
min: float
max: float
def __post_init__(self):
if not self.min <= self.max:
raise ValueError('min must be less than or equal to max.')
@dataclass
class DisplaySettings:
contrastLimits: ContrastLimits
color: str = 'white'
invertColormap: bool = False
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class DatasetView:
datasetName: str
name: str
description: str
position: Optional[Sequence[float]]
scale: Optional[float]
volumeKeys: Sequence[str]
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class MultiscaleSpec:
reduction: str
depth: int
factors: Union[int, Sequence[int]]
@dataclass
class MeshSource:
path: str
name: str
datasetName: str
format: str
@dataclass
class VolumeSource:
path: str
name: str
datasetName: str
dataType: str
dimensions: Sequence[float]
transform: SpatialTransform
contentType: str
containerType: Optional[ContainerTypes]
displaySettings: DisplaySettings
description: str = ''
version: str="0"
tags: Optional[Sequence[str]] = None
def __post_init__(self):
assert self.contentType in CONTENT_TYPES
assert len(self.version) > 0
def toDataArray(self):
if Path(self.path).suffix == ".mrc":
array = mrc_to_dask(self.path, chunks=(1, -1, -1))
else:
r = read(self.path)
array = da.from_array(r, chunks=r.chunks)
coords = [
DataArray(
self.transform.translate[idx] + np.arange(array.shape[idx]) * self.transform.scale[idx],
dims=ax,
attrs= {'units': self.transform.units[idx]}
)
for idx, ax in enumerate(self.transform.axes)
]
return DataArray(array, coords=coords, name=self.name)
@classmethod
def fromDict(cls, d: Dict[str, Any]):
return dacite.from_dict(cls, d)
@dataclass
class DatasetIndex:
name: str
volumes: Sequence[VolumeSource]
meshes: Sequence[MeshSource]
views: Sequence[DatasetView]
@classmethod
def from_json(cls, fname: Union[str, Path], open_kwargs: dict = {}):
with fsspec.open(str(fname), mode='rt', **open_kwargs) as fh:
jblob = json.loads(fh.read())
return cls(**jblob)
def to_json(self, fname: Union[str, Path], open_kwargs: dict = {}) -> int:
jblob = json.dumps(asdict(self))
with fsspec.open(str(fname), mode='wt', **open_kwargs) as fh:
result = fh.write(jblob)
return result
@dataclass
class VolumeIngest:
source: VolumeSource
multiscaleSpec: MultiscaleSpec
storageSpec: VolumeStorageSpec
mutation: Optional[str] = None
@dataclass
class COSEMArrayAttrs:
name: str
transform: SpatialTransform
@classmethod
def fromDataArray(cls, data: DataArray) -> "COSEMArrayAttrs":
name = data.name
if name is not None:
return cls(str(name), SpatialTransform.fromDataArray((data)))
else:
raise ValueError('DataArray argument must have a valid name')
@dataclass
class OMEScaleAttrs:
path: str
transform: SpatialTransform
@dataclass
class OMEMultiscaleAttrs:
datasets: Sequence[OMEScaleAttrs]
@dataclass
class COSEMGroupAttrs:
name: str
multiscales: Sequence[OMEMultiscaleAttrs]
@dataclass
class N5PixelResolution:
dimensions: Sequence[float]
unit: str
@dataclass
class NeuroglancerGroupAttrs:
# see https://github.com/google/neuroglancer/issues/176#issuecomment-553027775
axes: Sequence[str]
units: Sequence[str]
scales: Sequence[Sequence[int]]
pixelResolution: N5PixelResolution
@dataclass
class MultiscaleGroupAttrs:
name: str
multiscales: Sequence[OMEMultiscaleAttrs]
axes: Sequence[str]
units: Sequence[str]
scales: Sequence[Sequence[int]]
pixelResolution: N5PixelResolution
def makeN5ArrayAttrs(dimensions: Sequence[float], unit: str) -> Dict[str, N5PixelResolution]:
return {'pixelResolution': N5PixelResolution(dimensions, unit)}
def makeMultiscaleGroupAttrs(name: str,
arrays: Sequence[DataArray],
array_paths: Sequence[str],
axis_order: str="F") -> MultiscaleGroupAttrs:
assert len(arrays) == len(array_paths)
cosemArrayAttrs = tuple(COSEMArrayAttrs.fromDataArray(a) for a in arrays)
axis_indexer = slice(None)
# neuroglancer wants the axes reported in fortran order
if axis_order == "F":
axis_indexer = slice(-1, None, -1)
axes: Tuple[str] = arrays[0].dims[axis_indexer]
scales = tuple(tuple(s.scale_factors)[axis_indexer] for s in arrays)
coords_reordered = tuple(arrays[0].coords[k] for k in axes)
units = tuple(d.units for d in coords_reordered)
# we need this for neuroglancer
pixelResolution = N5PixelResolution(dimensions=cosemArrayAttrs[0].transform.scale[axis_indexer], unit=units[0])
multiscales = OMEMultiscaleAttrs(datasets=[OMEScaleAttrs(path=ap, transform=attr.transform) for ap, attr in zip(array_paths, cosemArrayAttrs)])
result = MultiscaleGroupAttrs(name=name,
multiscales=[multiscales],
axes=axes,
units=units,
scales=scales,
pixelResolution=pixelResolution)
return result
@dataclass
class CompositeArrayAttrs:
name: str
transform: SpatialTransform
pixelResolution: N5PixelResolution
@classmethod
def fromDataArray(cls, data: DataArray):
cosemAttrs = COSEMArrayAttrs.fromDataArray(data)
pixelResolution = N5PixelResolution(cosemAttrs.transform.scale[::-1], unit=cosemAttrs.transform.units[0])
return cls(cosemAttrs.name, cosemAttrs.transform, pixelResolution) | 28.015564 | 147 | 0.659167 | [
"MIT"
] | trautmane/fibsem-tools | src/fibsem_tools/attrs/attrs.py | 7,200 | Python |
"""A dictionary of module names to pytype overlays.
Some libraries need custom overlays to provide useful type information. Pytype
has some built-in overlays, and additional overlays may be added to the overlays
dictionary. See overlay.py for the overlay interface and the *_overlay.py files
for examples.
Each entry in custom_overlays maps the module name to the overlay object
"""
from pytype import abc_overlay
from pytype import asyncio_types_overlay
from pytype import collections_overlay
from pytype import functools_overlay
from pytype import future_overlay
from pytype import six_overlay
from pytype import sys_overlay
from pytype import typing_overlay
# Collection of module overlays, used by the vm to fetch an overlay
# instead of the module itself. Memoized in the vm itself.
overlays = {
"abc": abc_overlay.ABCOverlay,
"asyncio": asyncio_types_overlay.AsyncioOverlay,
"collections": collections_overlay.CollectionsOverlay,
"functools": functools_overlay.FunctoolsOverlay,
"future.utils": future_overlay.FutureUtilsOverlay,
"six": six_overlay.SixOverlay,
"sys": sys_overlay.SysOverlay,
"types": asyncio_types_overlay.TypesOverlay,
"typing": typing_overlay.TypingOverlay,
}
| 38.25 | 80 | 0.800654 | [
"Apache-2.0"
] | gs-jackal/pytype | pytype/overlay_dict.py | 1,224 | Python |
from bottle import route, run, template
@route('/hello/<name>')
def index(name):
return template('<b>Hello {{name}}</b>!', name=name)
run(host='localhost', port=8080)
| 19.333333 | 56 | 0.66092 | [
"MIT"
] | axeldeveloper/codigos | python/hello_bottle.py | 174 | Python |
''' Natural language understanding model based on multi-task learning.
This model is trained on two tasks: slot tagging and user intent prediction.
Inputs: user utterance, e.g. BOS w1 w2 ... EOS
Outputs: slot tags and user intents, e.g. O O B-moviename ... O\tinform+moviename
Author : Xuesong Yang
Email : [email protected]
Created Date: Dec. 31, 2016
'''
from DataSetCSVslotTagging import DataSetCSVslotTagging
from keras.layers import Input, LSTM, Dense, Dropout, merge, Embedding, TimeDistributed
from keras.models import Model
from utils import print_params, eval_slotTagging, eval_intentPredict, writeTxt, getNLUpred, getActPred, getTagPred, checkExistence, getNLUframeAccuracy, eval_actPred
import os
import numpy as np
np.random.seed(1983)
def writeUtterTagIntentTxt(utter_txt, tag_txt, intent_txt, target_fname):
with open(target_fname, 'wb') as f:
for (utter, tag, intent) in zip(utter_txt, tag_txt, intent_txt):
tag_new = [token.replace('tag-', '', 1) for token in tag.split()]
intent_new = [token.replace('intent-', '', 1)
for token in intent.split(';')]
new_line = '{}\t{}\t{}'.format(
utter, ' '.join(tag_new), ';'.join(intent_new))
f.write('{}\n'.format(new_line))
class SlotTaggingModel(object):
def __init__(self, **argparams):
self.train_data = argparams['train_data']
if self.train_data is not None:
assert isinstance(self.train_data, DataSetCSVslotTagging)
self.test_data = argparams['test_data']
if self.test_data is not None:
assert isinstance(self.test_data, DataSetCSVslotTagging)
self.dev_data = argparams['dev_data']
if self.dev_data is not None:
assert isinstance(self.dev_data, DataSetCSVslotTagging)
self.model_folder = argparams['model_folder']
if self.model_folder is None:
pid = argparams['pid']
self.model_folder = './model/slot_{}'.format(pid)
os.makedirs('{}/weights'.format(self.model_folder))
os.makedirs('{}/dev_results'.format(self.model_folder))
self.epoch_nb = argparams['epoch_nb']
self.batch_size = argparams['batch_size']
self.embedding_size = argparams['embedding_size']
self.hidden_size = argparams['hidden_size']
self.dropout = argparams['dropout_ratio']
self.optimizer = argparams['optimizer']
self.patience = argparams['patience']
self.loss = argparams['loss']
self.test_tag_flag = argparams['test_tag_only']
self.test_intent_flag = argparams['test_intent_only']
self.threshold = argparams['threshold']
self.weights_fname = argparams['weights_fname']
self.params = argparams
def _build(self):
print('Building Graph ...')
words_input = Input(shape=(self.maxlen_userUtter,),
dtype='int32', name='words_input')
# reserve 0 for masking, therefore vocab_size + 1
embeddings = Embedding(input_dim=self.word_vocab_size + 1,
output_dim=self.embedding_size,
input_length=self.maxlen_userUtter,
mask_zero=True)(words_input)
embeddings = Dropout(self.dropout)(embeddings)
lstm_forward = LSTM(output_dim=self.hidden_size,
return_sequences=True,
name='LSTM_forward')(embeddings)
lstm_forward = Dropout(self.dropout)(lstm_forward)
lstm_backward = LSTM(output_dim=self.hidden_size,
return_sequences=True,
go_backwards=True,
name='LSTM_backward')(embeddings)
lstm_backward = Dropout(self.dropout)(lstm_backward)
lstm_concat = merge([lstm_forward, lstm_backward],
mode='concat',
concat_axis=-1,
name='merge_bidirections')
slot_softmax_seq = TimeDistributed(Dense(
output_dim=self.userTag_vocab_size,
activation='softmax'), name='slot_output')(lstm_concat)
intent_summary = LSTM(output_dim=self.hidden_size,
return_sequences=False,
name='summarize_to_dense')(lstm_concat)
intent_summary = Dropout(self.dropout)(intent_summary)
# intent_softmax = Dense(output_dim=self.userIntent_vocab_size,
# activation='softmax', name='intent_output')(intent_summary)
intent_softmax = Dense(output_dim=self.userIntent_vocab_size,
activation='sigmoid', name='intent_output')(intent_summary)
self.model = Model(input=words_input, output=[
slot_softmax_seq, intent_softmax])
self.model.compile(optimizer=self.optimizer,
# metrics=['accuracy'],
sample_weight_mode={
'slot_output': 'temporal', 'intent_output': None},
loss={'slot_output': self.loss, 'intent_output': 'binary_crossentropy'})
def train(self):
print('Training model ...')
# load params
self.maxlen_userUtter = self.train_data.maxlen_userUtter
self.word_vocab_size = self.train_data.word_vocab_size
self.userIntent_vocab_size = self.train_data.userIntent_vocab_size
self.userTag_vocab_size = self.train_data.userTag_vocab_size
self.id2word = self.train_data.id2word
self.id2userTag = self.train_data.id2userTag
self.id2userIntent = self.train_data.id2userIntent
self.userTag2id = self.train_data.userTag2id
other_npz = '{}/other_vars.npz'.format(self.model_folder)
train_vars = {'id2userTag': self.id2userTag,
'id2word': self.id2word,
'id2userIntent': self.id2userIntent,
'userTag2id': self.userTag2id,
'userTag_vocab_size': self.userTag_vocab_size,
'userIntent_vocab_size': self.userIntent_vocab_size,
'word_vocab_size': self.word_vocab_size,
'maxlen_userUtter': self.maxlen_userUtter}
np.savez_compressed(other_npz, **train_vars)
self.params['maxlen_userUtter'] = self.maxlen_userUtter
self.params['word_vocab_size'] = self.word_vocab_size
self.params['userTag_vocab_size'] = self.userTag_vocab_size
self.params['userIntent_vocab_size'] = self.userIntent_vocab_size
print_params(self.params)
# build model graph, save graph and plot graph
self._build()
self._plot_graph()
graph_yaml = '{}/graph-arch.yaml'.format(self.model_folder)
with open(graph_yaml, 'w') as fyaml:
fyaml.write(self.model.to_yaml())
# load train data
X_train = self.train_data.userUtter_encodePad
tag_train = self.train_data.userTag_1hotPad
intent_train = self.train_data.userIntent_vecBin
train_utter_txt = self.train_data.userUtter_txt
train_intent_txt = self.train_data.userIntent_txt
train_tag_txt = self.train_data.userTag_txt
train_target_fname = '{}/train.target'.format(self.model_folder)
writeUtterTagIntentTxt(train_utter_txt, train_tag_txt, train_intent_txt, train_target_fname)
# load dev data
X_dev = self.dev_data.userUtter_encodePad
tag_dev = self.dev_data.userTag_1hotPad
intent_dev = self.dev_data.userIntent_vecBin
dev_utter_txt = self.dev_data.userUtter_txt
dev_intent_txt = self.dev_data.userIntent_txt
dev_tag_txt = self.dev_data.userTag_txt
dev_target_fname = '{}/dev.target'.format(self.model_folder)
writeUtterTagIntentTxt(dev_utter_txt, dev_tag_txt, dev_intent_txt, dev_target_fname)
# get mask matrix for train and dev set
mask_array_train = np.zeros_like(X_train)
mask_array_train[X_train != 0] = 1
mask_array_dev = np.zeros_like(X_dev)
mask_array_dev[X_dev != 0] = 1
# jointly training
for ep in xrange(self.epoch_nb):
print('<Epoch {}>'.format(ep))
self.model.fit(x=X_train,
y={'slot_output': tag_train,
'intent_output': intent_train},
sample_weight={'slot_output': mask_array_train,
'intent_output': None},
batch_size=self.batch_size, nb_epoch=1, verbose=2)
tag_probs, intent_probs = self.model.predict(X_dev)
# calculate token-level scores
precision_tag, recall_tag, fscore_tag, accuracy_frame_tag = eval_slotTagging(tag_probs, mask_array_dev,
tag_dev, self.userTag2id['tag-O'])
print('SlotTagging: ep={}, precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}'.format(ep, precision_tag, recall_tag, fscore_tag, accuracy_frame_tag))
precision_intent, recall_intent, fscore_intent, accuracy_frame_intent, threshold = eval_intentPredict(intent_probs,
intent_dev)
print('Intent Prediction: ep={}, precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}, threshold={:.4f}'.format(ep, precision_intent, recall_intent, fscore_intent, accuracy_frame_intent, threshold))
accuracy_frame_both = getNLUframeAccuracy(tag_probs, mask_array_dev, tag_dev, intent_probs, intent_dev, threshold)
print('NLU Frame: ep={}, accuracy={:.4f}'.format(ep, accuracy_frame_both))
dev_tag_pred_txt, dev_intent_pred_txt = getNLUpred(tag_probs, mask_array_dev, self.id2userTag, intent_probs, threshold, self.id2userIntent)
dev_results_fname = '{}/dev_results/dev_ep={}.pred'.format(self.model_folder, ep)
writeUtterTagIntentTxt(dev_utter_txt, dev_tag_pred_txt, dev_intent_pred_txt, dev_results_fname)
print('Write dev results: {}'.format(dev_results_fname))
weights_fname = '{}/weights/ep={}_tagF1={:.4f}frameAcc={:.4f}_intentF1={:.4f}frameAcc={:.4f}th={:.4f}.h5'.format(self.model_folder, ep, fscore_tag, accuracy_frame_tag, fscore_intent, accuracy_frame_intent, threshold)
print('Saving Model: {}'.format(weights_fname))
self.model.save_weights(weights_fname, overwrite=True)
def _plot_graph(self):
from keras.utils import visualize_util
graph_png = '{}/graph-plot.png'.format(self.model_folder)
visualize_util.plot(self.model,
to_file=graph_png,
show_shapes=True,
show_layer_names=True)
def predict(self):
print('Predicting ...')
result_folder = '{}/test_result'.format(self.model_folder)
if not os.path.exists(result_folder):
os.makedirs(result_folder)
# write user utters
utter_fname = '{}/utter.txt'.format(result_folder)
if not os.path.exists(utter_fname):
utter_txt = self.test_data.userUtter_txt
writeTxt(utter_txt, utter_fname, prefix='', delimiter=None)
print('\ttest_utter={}'.format(utter_fname))
# load test data and calculate posterior probs.
X_test = self.test_data.userUtter_encodePad
tag_probs, intent_probs = self.model.predict(X_test) # a tuple, slot_tags and intents
# make prediction
if self.test_intent_flag:
assert self.threshold is not None, 'Argument required: --threshold'
intent_probs_fname = '{}/intentProb_{}.npz'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])
np.savez_compressed(intent_probs_fname, probs=intent_probs)
print('\tintent_probs={}'.format(intent_probs_fname))
# write prediction test results
pred_intent_fname = '{}/intent_{}.pred'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])
pred_intent_txt = getActPred(intent_probs, self.threshold, self.id2userIntent)
writeTxt(pred_intent_txt, pred_intent_fname, prefix='intent-', delimiter=';')
print('\tintent_pred={}'.format(pred_intent_fname))
# write target test
target_intent_fname = '{}/intent_test.target'.format(result_folder)
target_intent = self.test_data.userIntent_txt
writeTxt(target_intent, target_intent_fname, prefix='intent-', delimiter=';')
print('\tintent_target={}'.format(target_intent_fname))
# calculate performance scores
preds_indicator, precision, recall, fscore, accuracy_frame = eval_actPred(intent_probs,
self.test_data.userIntent_vecBin,
self.threshold)
print('IntentPred: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}'.format(precision, recall, fscore, accuracy_frame))
if self.test_tag_flag:
tag_probs_fname = '{}/tagProb_{}.npz'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])
np.savez_compressed(tag_probs_fname, probs=tag_probs)
print('\ttag_probs={}'.format(tag_probs_fname))
# write prediction results
pred_tag_fname = '{}/tag_{}.pred'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])
mask_test = np.zeros_like(X_test)
mask_test[X_test != 0] = 1
pred_tag_txt = getTagPred(tag_probs, mask_test, self.id2userTag)
writeTxt(pred_tag_txt, pred_tag_fname, prefix='tag-', delimiter=None)
print('\ttag_pred={}'.format(pred_tag_fname))
# write target
target_tag_fname = '{}/tag_test.target'.format(result_folder)
target_tag = self.test_data.userTag_txt
writeTxt(target_tag, target_tag_fname, prefix='tag-', delimiter=None)
print('\ttag_target={}'.format(target_tag_fname))
# calculate performance scores
precision, recall, fscore, accuracy_frame = eval_slotTagging(tag_probs, mask_test,
self.test_data.userTag_1hotPad, self.userTag2id['tag-O'])
print('SlotTagging: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}'.format(precision, recall, fscore, accuracy_frame))
def load_model(self):
print('Loading model ...')
# check existence of params
assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
assert self.weights_fname is not None, 'Argument required: --weights-file'
checkExistence(self.weights_fname)
model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
checkExistence(model_graph)
checkExistence(model_train_vars)
from keras.models import model_from_yaml
with open(model_graph, 'r') as fgraph:
self.model = model_from_yaml(fgraph.read())
self.model.load_weights(self.weights_fname)
npzfile = np.load(model_train_vars)
self.maxlen_userUtter = np.int32(npzfile['maxlen_userUtter'][()])
self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
self.userIntent_vocab_size = np.int32(
npzfile['userIntent_vocab_size'][()])
self.id2userTag = npzfile['id2userTag'][()]
self.id2word = npzfile['id2word'][()]
self.id2userIntent = npzfile['id2userIntent'][()]
self.userTag2id = npzfile['userTag2id'][()]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-npz', dest='data_npz',
help='.npz file including instances of DataSetCSVslotTagging for train, dev and test')
parser.add_argument('--loss', dest='loss',
default='categorical_crossentropy',
help='objective function')
parser.add_argument('--optimizer', dest='optimizer',
default='adam', help='optimizer')
parser.add_argument('--epoch-nb', dest='epoch_nb', type=int,
default=300, help='number of epoches')
parser.add_argument('--embedding-size', dest='embedding_size', type=int,
default=512, help='the dimention of word embeddings.')
parser.add_argument('--patience', dest='patience', type=int,
default=10, help='the patience for early stopping criteria')
parser.add_argument('--batch-size', dest='batch_size', type=int,
default=32, help='batch size')
parser.add_argument('--hidden-size', dest='hidden_size', type=int,
default=128, help='the number of hidden units in recurrent layer')
parser.add_argument('--dropout-ratio', dest='dropout_ratio',
type=float, default=0.5, help='dropout ratio')
parser.add_argument('--model-folder', dest='model_folder',
help='the folder contains graph.yaml, weights.h5, and other_vars.npz')
parser.add_argument('--test-tag', dest='test_tag_only', action='store_true',
help='only perform user Tagging test if this option is activated.')
parser.add_argument('--test-intent', dest='test_intent_only', action='store_true',
help='only perform user intent test if this option is activated.')
parser.add_argument('--train', dest='train_only', action='store_true',
help='only perform training if this option is activated.')
parser.add_argument('--weights-file', dest='weights_fname', help='.h5 weights file.')
parser.add_argument('--threshold', dest='threshold', type=float, help='float number of threshold for multi-label prediction decision.')
args = parser.parse_args()
argparams = vars(args)
# early stop criteria are different for two tasks, therefore one model is
# chosen for each.
test_tag_only = argparams['test_tag_only']
test_intent_only = argparams['test_intent_only']
train_only = argparams['train_only']
assert train_only or test_tag_only or test_intent_only, 'Arguments required: either --train, --test-tag, or --test-intent'
pid = os.getpid()
argparams['pid'] = pid
npz_fname = argparams['data_npz']
checkExistence(npz_fname)
data_npz = np.load(npz_fname)
if train_only: # train model
argparams['train_data'] = data_npz['train_data'][()]
argparams['dev_data'] = data_npz['dev_data'][()]
argparams['test_data'] = None
model = SlotTaggingModel(**argparams)
model.train()
else:
# train_only is False, while test_only is True
# need to load model
argparams['train_data'] = None
argparams['dev_data'] = None
argparams['test_data'] = None
if argparams['model_folder'] is None:
raise Exception('Argument required: --model-folder')
model = SlotTaggingModel(**argparams)
model.load_model()
# test
if test_tag_only or test_intent_only:
model.test_data = data_npz['test_data'][()]
model.predict()
| 57.956395 | 228 | 0.62853 | [
"MIT"
] | XuesongYang/end2end_dialog | SlotTaggingModel_multitask.py | 19,937 | Python |
'''
PipedImagerPQ is a graphics viewer application written in PyQt
that receives its images and commands primarily from another
application through a pipe. A limited number of commands are
provided by the viewer itself to allow saving and some manipulation
of the displayed image. The controlling application, however, may
be unaware of these modifications made to the image.
PipedImagerPQProcess is used to create and run a PipedImagerPQ.
This package was developed by the Thermal Modeling and Analysis
Project (TMAP) of the National Oceanographic and Atmospheric
Administration's (NOAA) Pacific Marine Environmental Lab (PMEL).
'''
from __future__ import print_function
import sys
import os
import time
import signal
try:
import sip
except ImportError:
import PyQt4.sip as sip
try:
sip.setapi('QVariant', 2)
except Exception:
pass
# First try to import PyQt5, then try PyQt4 if that fails
try:
import PyQt5
QT_VERSION = 5
except ImportError:
import PyQt4
QT_VERSION = 4
# Now that the PyQt version is determined, import the parts
# allowing any import errors to propagate out
if QT_VERSION == 5:
from PyQt5.QtCore import Qt, QPointF, QRectF, QSize, QTimer
from PyQt5.QtGui import QBrush, QColor, QImage, QPainter, \
QPalette, QPen, QPixmap, QPolygonF
from PyQt5.QtWidgets import QAction, QApplication, QDialog, \
QFileDialog, QLabel, QMainWindow, \
QMessageBox, QPushButton, QScrollArea
else:
from PyQt4.QtCore import Qt, QPointF, QRectF, QSize, QTimer
from PyQt4.QtGui import QAction, QApplication, QBrush, QColor, QDialog, \
QFileDialog, QImage, QLabel, QMainWindow, \
QMessageBox, QPainter, QPalette, QPen, QPixmap, \
QPolygonF, QPushButton, QScrollArea
import multiprocessing
from pipedviewer import WINDOW_CLOSED_MESSAGE
from pipedviewer.cmndhelperpq import CmndHelperPQ
from pipedviewer.scaledialogpq import ScaleDialogPQ
class PipedImagerPQ(QMainWindow):
'''
A PyQt graphics viewer that receives images and commands through
a pipe.
A command is a dictionary with string keys. For example,
{ "action":"save",
"filename":"ferret.png",
"fileformat":"png" }
The command { "action":"exit" } will shutdown the viewer.
'''
def __init__(self, cmndpipe, rspdpipe):
'''
Create a PyQt viewer which reads commands from the Pipe
cmndpipe and writes responses back to rspdpipe.
'''
super(PipedImagerPQ, self).__init__()
self.__cmndpipe = cmndpipe
self.__rspdpipe = rspdpipe
# ignore Ctrl-C
signal.signal(signal.SIGINT, signal.SIG_IGN)
# unmodified image for creating the scene
self.__sceneimage = None
# bytearray of data for the above image
self.__scenedata = None
# flag set if in the process of reading image data from commands
self.__loadingimage = False
# width and height of the unmodified scene image
# when the image is defined
# initialize the width and height to values that will create
# a viewer (mainWindow) of the right size
self.__scenewidth = int(10.8 * self.physicalDpiX())
self.__sceneheight = int(8.8 * self.physicalDpiY())
# by default pay attention to any alpha channel values in colors
self.__noalpha = False
# initial default color for the background (opaque white)
self.__lastclearcolor = QColor(0xFFFFFF)
self.__lastclearcolor.setAlpha(0xFF)
# scaling factor for creating the displayed scene
self.__scalefactor = 1.0
# automatically adjust the scaling factor to fit the window frame?
self.__autoscale = True
# minimum label width and height (for minimum scaling factor)
# and minimum image width and height (for error checking)
self.__minsize = 128
# create the label, that will serve as the canvas, in a scrolled area
self.__scrollarea = QScrollArea(self)
self.__label = QLabel(self.__scrollarea)
# set the initial label size and other values for the scrolled area
self.__label.setMinimumSize(self.__scenewidth, self.__sceneheight)
self.__label.resize(self.__scenewidth, self.__sceneheight)
# setup the scrolled area
self.__scrollarea.setWidget(self.__label)
self.__scrollarea.setBackgroundRole(QPalette.Dark)
self.setCentralWidget(self.__scrollarea)
# default file name and format for saving the image
self.__lastfilename = "ferret.png"
self.__lastformat = "png"
# command helper object
self.__helper = CmndHelperPQ(self)
# create the menubar
self.__scaleact = QAction(self.tr("&Scale"), self,
shortcut=self.tr("Ctrl+S"),
statusTip=self.tr("Scale the image (canvas and image change size)"),
triggered=self.inquireSceneScale)
self.__saveact = QAction(self.tr("Save &As..."), self,
shortcut=self.tr("Ctrl+A"),
statusTip=self.tr("Save the image to file"),
triggered=self.inquireSaveFilename)
self.__redrawact = QAction(self.tr("&Redraw"), self,
shortcut=self.tr("Ctrl+R"),
statusTip=self.tr("Clear and redraw the image"),
triggered=self.redrawScene)
self.__aboutact = QAction(self.tr("&About"), self,
statusTip=self.tr("Show information about this viewer"),
triggered=self.aboutMsg)
self.__aboutqtact = QAction(self.tr("About &Qt"), self,
statusTip=self.tr("Show information about the Qt library"),
triggered=self.aboutQtMsg)
self.createMenus()
# set the initial size of the viewer
self.__framedelta = 4
mwwidth = self.__scenewidth + self.__framedelta
mwheight = self.__sceneheight + self.__framedelta \
+ self.menuBar().height() \
+ self.statusBar().height()
self.resize(mwwidth, mwheight)
# check the command queue any time there are no window events to deal with
self.__timer = QTimer(self)
self.__timer.timeout.connect(self.checkCommandPipe)
self.__timer.setInterval(0)
self.__timer.start()
def createMenus(self):
'''
Create the menu items for the viewer
using the previously created actions.
'''
menuBar = self.menuBar()
sceneMenu = menuBar.addMenu(menuBar.tr("&Image"))
sceneMenu.addAction(self.__scaleact)
sceneMenu.addAction(self.__saveact)
sceneMenu.addAction(self.__redrawact)
helpMenu = menuBar.addMenu(menuBar.tr("&Help"))
helpMenu.addAction(self.__aboutact)
helpMenu.addAction(self.__aboutqtact)
def resizeEvent(self, event):
'''
Monitor resizing in case auto-scaling of the image is selected.
'''
if self.__autoscale:
if self.autoScaleScene():
# continue with the window resize
event.accept()
else:
# another resize coming in, so ignore this one
event.ignore()
else:
# continue with the window resize
event.accept()
def closeEvent(self, event):
'''
Clean up and send the WINDOW_CLOSED_MESSAGE on the response pipe
before closing the window.
'''
self.__timer.stop()
self.__cmndpipe.close()
try:
try:
self.__rspdpipe.send(WINDOW_CLOSED_MESSAGE)
finally:
self.__rspdpipe.close()
except Exception:
pass
event.accept()
def exitViewer(self):
'''
Close and exit the viewer.
'''
self.close()
def aboutMsg(self):
QMessageBox.about(self, self.tr("About PipedImagerPQ"),
self.tr("\n" \
"PipedImagerPQ is a graphics viewer application that receives its " \
"displayed image and commands primarily from another application " \
"through a pipe. A limited number of commands are provided by the " \
"viewer itself to allow saving and some manipulation of the " \
"displayed image. The controlling application, however, may be " \
"unaware of these modifications made to the image. " \
"\n\n" \
"PipedImagerPQ was developed by the Thermal Modeling and Analysis " \
"Project (TMAP) of the National Oceanographic and Atmospheric " \
"Administration's (NOAA) Pacific Marine Environmental Lab (PMEL). "))
def aboutQtMsg(self):
QMessageBox.aboutQt(self, self.tr("About Qt"))
def ignoreAlpha(self):
'''
Return whether the alpha channel in colors should always be ignored.
'''
return self.__noalpha
def updateScene(self):
'''
Clear the displayed scene using self.__lastclearcolor,
then draw the scaled current image.
'''
# get the scaled scene size
labelwidth = int(self.__scalefactor * self.__scenewidth + 0.5)
labelheight = int(self.__scalefactor * self.__sceneheight + 0.5)
# Create the new pixmap for the label to display
newpixmap = QPixmap(labelwidth, labelheight)
newpixmap.fill(self.__lastclearcolor)
if self.__sceneimage != None:
# Draw the scaled image to the pixmap
mypainter = QPainter(newpixmap)
trgrect = QRectF(0.0, 0.0, float(labelwidth),
float(labelheight))
srcrect = QRectF(0.0, 0.0, float(self.__scenewidth),
float(self.__sceneheight))
mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor)
mypainter.end()
# Assign the new pixmap to the label
self.__label.setPixmap(newpixmap)
# set the label size and values
# so the scrollarea knows of the new size
self.__label.setMinimumSize(labelwidth, labelheight)
self.__label.resize(labelwidth, labelheight)
# update the label from the new pixmap
self.__label.update()
def clearScene(self, bkgcolor=None):
'''
Deletes the scene image and fills the label with bkgcolor.
If bkgcolor is None or an invalid color, the color used is
the one used from the last clearScene or redrawScene call
with a valid color (or opaque white if a color has never
been specified).
'''
# get the color to use for clearing (the background color)
if bkgcolor:
if bkgcolor.isValid():
self.__lastclearcolor = bkgcolor
# Remove the image and its bytearray
self.__sceneimage = None
self.__scenedata = None
# Update the scene label using the current clearing color and image
self.updateScene()
def redrawScene(self, bkgcolor=None):
'''
Clear and redraw the displayed scene.
'''
# get the background color
if bkgcolor:
if bkgcolor.isValid():
self.__lastclearcolor = bkgcolor
# Update the scene label using the current clearing color and image
QApplication.setOverrideCursor(Qt.WaitCursor)
self.statusBar().showMessage( self.tr("Redrawing image") )
try:
self.updateScene()
finally:
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
def resizeScene(self, width, height):
'''
Resize the scene to the given width and height in units of pixels.
If the size changes, this deletes the current image and clear the
displayed scene.
'''
newwidth = int(width + 0.5)
if newwidth < self.__minsize:
newwidth = self.__minsize
newheight = int(height + 0.5)
if newheight < self.__minsize:
newheight = self.__minsize
if (newwidth != self.__scenewidth) or (newheight != self.__sceneheight):
# set the new size for the empty scene
self.__scenewidth = newwidth
self.__sceneheight = newheight
# If auto-scaling, set scaling factor to 1.0 and resize the window
if self.__autoscale:
self.__scalefactor = 1.0
barheights = self.menuBar().height() + self.statusBar().height()
self.resize(newwidth + self.__framedelta,
newheight + self.__framedelta + barheights)
# clear the scene with the last clearing color
self.clearScene(None)
def loadNewSceneImage(self, imageinfo):
'''
Create a new scene image from the information given in this
and subsequent dictionaries imageinfo. The image is created
from multiple calls to this function since there is a limit
on the size of a single object passed through a pipe.
The first imageinfo dictionary given when creating an image
must define the following key and value pairs:
"width": width of the image in pixels
"height": height of the image in pixels
"stride": number of bytes in one line of the image
in the bytearray
The scene image data is initialized to all zero (transparent)
at this time.
This initialization call must be followed by (multiple) calls
to this method with imageinfo dictionaries defining the key
and value pairs:
"blocknum": data block number (1, 2, ... numblocks)
"numblocks": total number of image data blocks
"startindex": index in the bytearray of image data
where this block of image data starts
"blockdata": this block of data as a bytearray
On receipt of the last block of data (blocknum == numblocks)
the scene image will be created and the scene will be updated.
Raises:
KeyError - if one of the above keys is not given
ValueError - if a value for a key is not valid
'''
if not self.__loadingimage:
# prepare for a new image data from subsequent calls
# get dimensions of the new image
myimgwidth = int( imageinfo["width"] )
myimgheight = int( imageinfo["height"] )
myimgstride = int( imageinfo["stride"] )
if (myimgwidth < self.__minsize) or (myimgheight < self.__minsize):
raise ValueError("image width and height cannot be less than %s" % str(self.__minsize))
# Newer PyQt versions allow separate specification of the stride
if myimgstride != 4 * myimgwidth:
raise ValueError("image stride is not four times the image width")
# create the bytearray to contain the new scene data
# automatically initialized to zero
self.__scenedata = bytearray(myimgstride * myimgheight)
self.__scenewidth = myimgwidth
self.__sceneheight = myimgheight
# set the flag for subsequent calls to this method
self.__loadingimage = True
# change the cursor to warn the user this may take some time
QApplication.setOverrideCursor(Qt.WaitCursor)
# put up an appropriate status message
self.statusBar().showMessage( self.tr("Loading new image") )
return
# loading an image; add the next block of data
myblocknum = int( imageinfo["blocknum"] )
mynumblocks = int( imageinfo["numblocks"] )
mystartindex = int( imageinfo["startindex"] )
myblockdata = imageinfo["blockdata"]
if (myblocknum < 1) or (myblocknum > mynumblocks):
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
raise ValueError("invalid image data block number or number of blocks")
if (mystartindex < 0) or (mystartindex >= len(self.__scenedata)):
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
raise ValueError("invalid start index for an image data block")
myblocksize = len(myblockdata)
myendindex = mystartindex + myblocksize
if (myblocksize < 1) or (myendindex > len(self.__scenedata)):
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
raise ValueError("invalid length of an image data block")
# update the status message to show progress
self.statusBar().showMessage( self.tr("Loading new image (block %s of %s)" % \
(str(myblocknum),str(mynumblocks))) )
# assign the data
self.__scenedata[mystartindex:myendindex] = myblockdata
# if this is the last block of data, create and display the scene image
if myblocknum == mynumblocks:
self.__loadingimage = False
self.statusBar().showMessage( self.tr("Creating new image") )
try:
self.__sceneimage = QImage(self.__scenedata,
self.__scenewidth,
self.__sceneheight,
QImage.Format_ARGB32_Premultiplied)
self.statusBar().showMessage( self.tr("Drawing new image") )
# update the displayed scene in the label
self.updateScene()
finally:
# clear the status message
self.statusBar().clearMessage()
# restore the cursor back to normal
QApplication.restoreOverrideCursor()
def inquireSceneScale(self):
'''
Prompt the user for the desired scaling factor for the scene.
'''
labelwidth = int(self.__scenewidth * self.__scalefactor + 0.5)
labelheight = int(self.__sceneheight * self.__scalefactor + 0.5)
scaledlg = ScaleDialogPQ(self.__scalefactor, labelwidth, labelheight,
self.__minsize, self.__minsize, self.__autoscale, self)
if scaledlg.exec_():
(newscale, autoscale, okay) = scaledlg.getValues()
if okay:
if autoscale:
self.__autoscale = True
self.autoScaleScene()
else:
self.__autoscale = False
self.scaleScene(newscale, False)
def autoScaleScene(self):
'''
Selects a scaling factor that maximizes the scene within the window
frame without requiring scroll bars. Intended to be called when
the window size is changed by the user and auto-scaling is turn on.
Returns:
True if scaling of this scene is done (no window resize)
False if the a window resize command was issued
'''
barheights = self.menuBar().height() + self.statusBar().height()
# get the size for the central widget
cwheight = self.height() - barheights - self.__framedelta
heightsf = float(cwheight) / float(self.__sceneheight)
cwwidth = self.width() - self.__framedelta
widthsf = float(cwwidth) / float(self.__scenewidth)
if heightsf < widthsf:
factor = heightsf
else:
factor = widthsf
newcwheight = int(factor * self.__sceneheight + 0.5)
newcwwidth = int(factor * self.__scenewidth + 0.5)
# if the window does not have the correct aspect ratio, resize it so
# it will; this will generate another call to this method. Otherwise,
# scale the scene and be done.
if self.isMaximized() or \
( (abs(cwheight - newcwheight) <= self.__framedelta) and \
(abs(cwwidth - newcwwidth) <= self.__framedelta) ):
self.scaleScene(factor, False)
return True
else:
self.resize(newcwwidth + self.__framedelta,
newcwheight + self.__framedelta + barheights)
return False
def scaleScene(self, factor, resizewin):
'''
Scales both the horizontal and vertical directions by factor.
Scaling factors are not accumulative. So if the scene was
already scaled, that scaling is "removed" before this scaling
factor is applied. If resizewin is True, the main window is
resized to accommodate this new scaled scene size.
If factor is zero, just switch to auto-scaling at the current
window size. If factor is negative, rescale using the absolute
value (possibly resizing the window) then switch to auto-scaling.
'''
fltfactor = float(factor)
if fltfactor != 0.0:
if resizewin:
# from command - turn off autoscaling for the following
# then turn back on if appropriate
self.__autoscale = False
newfactor = abs(fltfactor)
newlabwidth = int(newfactor * self.__scenewidth + 0.5)
newlabheight = int(newfactor * self.__sceneheight + 0.5)
if (newlabwidth < self.__minsize) or (newlabheight < self.__minsize):
# Set to minimum size
if self.__scenewidth <= self.__sceneheight:
newfactor = float(self.__minsize) / float(self.__scenewidth)
else:
newfactor = float(self.__minsize) / float(self.__sceneheight)
newlabwidth = int(newfactor * self.__scenewidth + 0.5)
newlabheight = int(newfactor * self.__sceneheight + 0.5)
oldlabwidth = int(self.__scalefactor * self.__scenewidth + 0.5)
oldlabheight = int(self.__scalefactor * self.__sceneheight + 0.5)
if (newlabwidth != oldlabwidth) or (newlabheight != oldlabheight):
# Set the new scaling factor
self.__scalefactor = newfactor
# Update the scene label using the current clearing color and image
QApplication.setOverrideCursor(Qt.WaitCursor)
self.statusBar().showMessage( self.tr("Scaling image") )
try:
self.updateScene()
finally:
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
if resizewin:
# resize the main window (if possible)
barheights = self.menuBar().height() + self.statusBar().height()
mwheight = newlabheight + barheights + self.__framedelta
mwwidth = newlabwidth + self.__framedelta
# Do not exceed the available real estate on the screen.
# If autoscaling is in effect, the resize will trigger
# any required adjustments.
scrnrect = QApplication.desktop().availableGeometry()
if mwwidth > 0.95 * scrnrect.width():
mwwidth = int(0.9 * scrnrect.width() + 0.5)
if mwheight > 0.95 * scrnrect.height():
mwheight = int(0.9 * scrnrect.height() + 0.5)
self.resize(mwwidth, mwheight)
if fltfactor <= 0.0:
# From command - turn on autoscaling
self.__autoscale = True
self.autoScaleScene();
def inquireSaveFilename(self):
'''
Prompt the user for the name of the file into which to save the scene.
The file format will be determined from the filename extension.
'''
formattypes = [ ( "png",
"PNG - Portable Networks Graphics (*.png)" ),
( "jpeg",
"JPEG - Joint Photographic Experts Group (*.jpeg *.jpg *.jpe)" ),
( "tiff",
"TIFF - Tagged Image File Format (*.tiff *.tif)" ),
( "bmp",
"BMP - Windows Bitmap (*.bmp)" ),
( "ppm",
"PPM - Portable Pixmap (*.ppm)" ),
( "xpm",
"XPM - X11 Pixmap (*.xpm)" ),
( "xbm",
"XBM - X11 Bitmap (*.xbm)" ), ]
filters = ";;".join( [ t[1] for t in formattypes ] )
if QT_VERSION == 5:
# getSaveFileName; tr returns Python unicode strings in PyQt5/Python3
(fileName, fileFilter) = QFileDialog.getSaveFileName(self,
self.tr("Save the current image as "), self.tr(self.__lastfilename), self.tr(filters))
else:
# getSaveFileNameAndFilter; tr returns QStrings in PyQt4
(fileName, fileFilter) = QFileDialog.getSaveFileNameAndFilter(self,
self.tr("Save the current image as "), self.tr(self.__lastfilename), self.tr(filters))
if fileName:
for (fmt, fmtQName) in formattypes:
if self.tr(fmtQName) == fileFilter:
fileFormat = fmt
break
else:
raise RuntimeError("Unexpected file format name '%s'" % fileFilter)
self.saveSceneToFile(fileName, fileFormat, None, None)
self.__lastfilename = fileName
self.__lastformat = fileFormat
def saveSceneToFile(self, filename, imageformat, transparent, rastsize):
'''
Save the current scene to the named file.
If imageformat is empty or None, the format is guessed from
the filename extension.
If transparent is False, the entire scene is initialized
to the last clearing color.
If given, rastsize is the pixels size of the saved image.
If rastsize is not given, the saved image will be saved
at the current scaled image size.
'''
# This could be called when there is no image present.
# If this is the case, ignore the call.
if ( self.__sceneimage == None ):
return
if not imageformat:
# Guess the image format from the filename extension
# This is only done to silently change gif to png
fileext = ( os.path.splitext(filename)[1] ).lower()
if fileext == '.gif':
myformat = 'gif'
else:
# let QImage figure out the format
myformat = None
else:
myformat = imageformat.lower()
if myformat == 'gif':
# Silently convert gif filename and format to png
myformat = 'png'
myfilename = os.path.splitext(filename)[0] + ".png"
else:
myfilename = filename
# set the cursor and status message to indicate a save is happending
QApplication.setOverrideCursor(Qt.WaitCursor)
self.statusBar().showMessage( self.tr("Saving image") )
try:
if rastsize:
imagewidth = int(rastsize.width() + 0.5)
imageheight = int(rastsize.height() + 0.5)
else:
imagewidth = int(self.__scenewidth * self.__scalefactor + 0.5)
imageheight = int(self.__sceneheight * self.__scalefactor + 0.5)
myimage = QImage( QSize(imagewidth, imageheight),
QImage.Format_ARGB32_Premultiplied )
# Initialize the image
if not transparent:
# Clear the image with self.__lastclearcolor
fillint = self.__helper.computeARGB32PreMultInt(self.__lastclearcolor)
else:
fillint = 0
myimage.fill(fillint)
# draw the scaled scene to this QImage
mypainter = QPainter(myimage)
trgrect = QRectF(0.0, 0.0, float(imagewidth),
float(imageheight))
srcrect = QRectF(0.0, 0.0, float(self.__scenewidth),
float(self.__sceneheight))
mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor)
mypainter.end()
# save the image to file
if not myimage.save(myfilename, myformat):
raise ValueError("Unable to save the plot as " + myfilename)
finally:
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
def checkCommandPipe(self):
'''
Get and perform commands waiting in the pipe.
Stop when no more commands or if more than 50
milliseconds have passed.
'''
try:
starttime = time.clock()
# Wait up to 2 milliseconds waiting for a command.
# This prevents unchecked spinning when there is
# nothing to do (Qt immediately calling this method
# again only for this method to immediately return).
while self.__cmndpipe.poll(0.002):
cmnd = self.__cmndpipe.recv()
self.processCommand(cmnd)
# Continue to try to process commands until
# more than 50 milliseconds have passed.
# This reduces Qt overhead when there are lots
# of commands waiting in the queue.
if (time.clock() - starttime) > 0.050:
break
except EOFError:
# Assume PyFerret has shut down
self.exitViewer()
except Exception:
# Some problem, but presumably still functional
(exctype, excval) = sys.exc_info()[:2]
try:
if excval:
self.__rspdpipe.send("**ERROR %s: %s" % (str(exctype), str(excval)))
else:
self.__rspdpipe.send("**ERROR %s" % str(exctype))
except Exception:
pass
def processCommand(self, cmnd):
'''
Examine the action of cmnd and call the appropriate
method to deal with this command. Raises a KeyError
if the "action" key is missing.
'''
try:
cmndact = cmnd["action"]
except KeyError:
raise ValueError("Unknown command '%s'" % str(cmnd))
if cmndact == "clear":
try:
bkgcolor = self.__helper.getColorFromCmnd(cmnd)
except KeyError:
bkgcolor = None
self.clearScene(bkgcolor)
elif cmndact == "exit":
self.exitViewer()
elif cmndact == "hide":
self.showMinimized()
elif cmndact == "screenInfo":
scrnrect = QApplication.desktop().availableGeometry()
info = ( self.physicalDpiX(), self.physicalDpiY(),
scrnrect.width(), scrnrect.height() )
self.__rspdpipe.send(info)
elif cmndact == "redraw":
try:
bkgcolor = self.__helper.getColorFromCmnd(cmnd)
except KeyError:
bkgcolor = None
self.redrawScene(bkgcolor)
elif cmndact == "rescale":
self.scaleScene(float(cmnd["factor"]), True)
elif cmndact == "resize":
mysize = self.__helper.getSizeFromCmnd(cmnd)
self.resizeScene(mysize.width(), mysize.height())
elif cmndact == "newImage":
self.loadNewSceneImage(cmnd)
elif cmndact == "save":
filename = cmnd["filename"]
fileformat = cmnd.get("fileformat", None)
try:
bkgcolor = self.__helper.getColorFromCmnd(cmnd)
except KeyError:
bkgcolor = None
rastsize = self.__helper.getSizeFromCmnd(cmnd["rastsize"])
self.saveSceneToFile(filename, fileformat, bkgcolor, rastsize)
elif cmndact == "setTitle":
self.setWindowTitle(cmnd["title"])
elif cmndact == "imgname":
myvalue = cmnd.get("name", None)
if myvalue:
self.__lastfilename = myvalue
myvalue = cmnd.get("format", None)
if myvalue:
self.__lastformat = myvalue.lower()
elif cmndact == "show":
if not self.isVisible():
self.show()
elif cmndact == "noalpha":
# ignore any alpha channel values in colors
self.__noalpha = True
else:
raise ValueError("Unknown command action %s" % str(cmndact))
class PipedImagerPQProcess(multiprocessing.Process):
'''
A Process specifically tailored for creating a PipedImagerPQ.
'''
def __init__(self, cmndpipe, rspdpipe):
'''
Create a Process that will produce a PipedImagerPQ
attached to the given Pipes when run.
'''
super(PipedImagerPQProcess,self).__init__(group=None, target=None, name='PipedImagerPQ')
self.__cmndpipe = cmndpipe
self.__rspdpipe = rspdpipe
self.__app = None
self.__viewer = None
def run(self):
'''
Create a PipedImagerPQ that is attached
to the Pipe of this instance.
'''
self.__app = QApplication(["PipedImagerPQ"])
self.__viewer = PipedImagerPQ(self.__cmndpipe, self.__rspdpipe)
myresult = self.__app.exec_()
sys.exit(myresult)
#
# The following are for testing this module
#
class _CommandSubmitterPQ(QDialog):
'''
Testing dialog for controlling the addition of commands to a pipe.
Used for testing PipedImagerPQ in the same process as the viewer.
'''
def __init__(self, parent, cmndpipe, rspdpipe, cmndlist):
'''
Create a QDialog with a single QPushButton for controlling
the submission of commands from cmndlist to cmndpipe.
'''
super(_CommandSubmitterPQ,self).__init__(parent)
self.__cmndlist = cmndlist
self.__cmndpipe = cmndpipe
self.__rspdpipe = rspdpipe
self.__nextcmnd = 0
self.__button = QPushButton("Submit next command", self)
self.__button.pressed.connect(self.submitNextCommand)
self.show()
def submitNextCommand(self):
'''
Submit the next command from the command list to the command pipe,
or shutdown if there are no more commands to submit.
'''
try:
cmndstr = str(self.__cmndlist[self.__nextcmnd])
if len(cmndstr) > 188:
cmndstr = cmndstr[:188] + '...'
print("Command: %s" % cmndstr)
self.__cmndpipe.send(self.__cmndlist[self.__nextcmnd])
self.__nextcmnd += 1
while self.__rspdpipe.poll(0.1):
print("Response: %s" % str(self.__rspdpipe.recv()))
except IndexError:
self.__rspdpipe.close()
self.__cmndpipe.close()
self.close()
def _test_pipedimagerpq():
# vertices of a pentagon (roughly) centered in a 1000 x 1000 square
pentagonpts = ( (504.5, 100.0), (100.0, 393.9),
(254.5, 869.4), (754.5, 869.4),
(909.0, 393.9), )
linepts = ( (350, 50),
(200, 150),
(400, 250),
(300, 350),
(150, 250),
(100, 450) )
# start PyQt
testapp = QApplication(["PipedImagerPQ"])
# create the list of commands to submit
drawcmnds = []
drawcmnds.append( { "action":"setTitle", "title":"Tester" } )
drawcmnds.append( { "action":"show" } )
drawcmnds.append( { "action":"clear", "color":"black"} )
drawcmnds.append( { "action":"screenInfo"} )
# create the image to be displayed
testimage = QImage(500, 500, QImage.Format_ARGB32_Premultiplied)
# initialize a black background
testimage.fill(0xFF000000)
# draw some things in the image
testpainter = QPainter(testimage)
testpainter.setBrush( QBrush(QColor(0, 255, 0, 128), Qt.SolidPattern) )
testpainter.setPen( QPen(QBrush(QColor(255, 0, 0, 255), Qt.SolidPattern),
5.0, Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin) )
testpainter.drawRect( QRectF(5.0, 255.0, 240.0, 240.0) )
testpainter.setBrush( QBrush(QColor(0, 0, 255, 255), Qt.SolidPattern) )
testpainter.setPen( QPen(QBrush(QColor(0, 0, 0, 255), Qt.SolidPattern),
5.0, Qt.DashLine, Qt.RoundCap, Qt.RoundJoin) )
testpainter.drawPolygon( QPolygonF(
[ QPointF(.25 * ptx, .25 * pty + 250) for (ptx, pty) in pentagonpts ] ) )
testpainter.setBrush( Qt.NoBrush )
testpainter.setPen( QPen(QBrush(QColor(255, 255, 255, 255), Qt.SolidPattern),
3.0, Qt.DashLine, Qt.RoundCap, Qt.RoundJoin) )
testpainter.drawPolyline( QPolygonF(
[ QPointF(pts, pty) for (pts, pty) in linepts ] ) )
testpainter.end()
# add the image command
testimgwidth = testimage.width()
testimgheight = testimage.height()
testimgstride = testimage.bytesPerLine()
# not a good way to get the pixel data
testimgdata = bytearray(testimgheight * testimgstride)
k = 0
for pty in range(testimgheight):
for ptx in range(testimgwidth):
pixval = testimage.pixel(ptx, pty)
(aval, rgbval) = divmod(pixval, 256 * 256 * 256)
(rval, gbval) = divmod(rgbval, 256 * 256)
(gval, bval) = divmod(gbval, 256)
testimgdata[k] = bval
k += 1
testimgdata[k] = gval
k += 1
testimgdata[k] = rval
k += 1
testimgdata[k] = aval
k += 1
testblocksize = 4000
testnumblocks = (testimgheight * testimgstride + testblocksize - 1) // testblocksize
drawcmnds.append( { "action":"newImage",
"width":testimgwidth,
"height":testimgheight,
"stride":testimgstride } )
for k in range(testnumblocks):
if k < (testnumblocks - 1):
blkdata = testimgdata[k*testblocksize:(k+1)*testblocksize]
else:
blkdata = testimgdata[k*testblocksize:]
drawcmnds.append( { "action":"newImage",
"blocknum":k+1,
"numblocks":testnumblocks,
"startindex":k*testblocksize,
"blockdata":blkdata } )
# finish the command list
drawcmnds.append( { "action":"show" } )
drawcmnds.append( { "action":"exit" } )
# create a PipedImagerPQ in this process
(cmndrecvpipe, cmndsendpipe) = multiprocessing.Pipe(False)
(rspdrecvpipe, rspdsendpipe) = multiprocessing.Pipe(False)
testviewer = PipedImagerPQ(cmndrecvpipe, rspdsendpipe)
# create a command submitter dialog
tester = _CommandSubmitterPQ(testviewer, cmndsendpipe,
rspdrecvpipe, drawcmnds)
tester.show()
# let it all run
testresult = testapp.exec_()
if testresult != 0:
sys.exit(testresult)
if __name__ == "__main__":
_test_pipedimagerpq()
| 43.410734 | 103 | 0.587274 | [
"Unlicense"
] | Jhongesell/PyFerret | pviewmod/pipedimagerpq.py | 39,634 | Python |
from django.core.urlresolvers import reverse_lazy
from django.views import generic
from select2_many_to_many.forms import TestForm
from select2_many_to_many.models import TestModel
class UpdateView(generic.UpdateView):
model = TestModel
form_class = TestForm
template_name = 'select2_outside_admin.html'
success_url = reverse_lazy('select2_crispy_forms')
def get_object(self):
return TestModel.objects.first()
| 27.625 | 54 | 0.789593 | [
"MIT"
] | MyFinanceInc/django-autocomplete-light | test_project/select2_outside_admin/views.py | 442 | Python |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from typing import List, Optional
from omegaconf import OmegaConf
from hydra.core.object_type import ObjectType
from hydra.plugins.config_source import ConfigLoadError, ConfigResult, ConfigSource
class FileConfigSource(ConfigSource):
def __init__(self, provider: str, path: str) -> None:
if path.find("://") == -1:
path = f"{self.scheme()}://{path}"
super().__init__(provider=provider, path=path)
@staticmethod
def scheme() -> str:
return "file"
def load_config(
self, config_path: str, package_override: Optional[str] = None
) -> ConfigResult:
normalized_config_path = self._normalize_file_name(config_path)
full_path = os.path.realpath(os.path.join(self.path, normalized_config_path))
if not os.path.exists(full_path):
raise ConfigLoadError(f"FileConfigSource: Config not found : {full_path}")
with open(full_path) as f:
header_text = f.read(512)
header = ConfigSource._get_header_dict(header_text)
self._update_package_in_header(
header, normalized_config_path, package_override
)
f.seek(0)
cfg = OmegaConf.load(f)
return ConfigResult(
config=self._embed_config(cfg, header["package"]),
path=f"{self.scheme()}://{self.path}",
provider=self.provider,
header=header,
)
def is_group(self, config_path: str) -> bool:
full_path = os.path.realpath(os.path.join(self.path, config_path))
return os.path.isdir(full_path)
def is_config(self, config_path: str) -> bool:
config_path = self._normalize_file_name(config_path)
full_path = os.path.realpath(os.path.join(self.path, config_path))
return os.path.isfile(full_path)
def list(self, config_path: str, results_filter: Optional[ObjectType]) -> List[str]:
files: List[str] = []
full_path = os.path.realpath(os.path.join(self.path, config_path))
for file in os.listdir(full_path):
file_path = os.path.join(config_path, file)
self._list_add_result(
files=files,
file_path=file_path,
file_name=file,
results_filter=results_filter,
)
return sorted(list(set(files)))
| 37.923077 | 88 | 0.627992 | [
"MIT"
] | MULXCODE/hydra | hydra/_internal/core_plugins/file_config_source.py | 2,465 | Python |
class SpaceAge(object):
ORBITAL_PERIOD = 31557600 # seconds
def __init__(self, seconds):
self.seconds = seconds
def on_mercury(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD) / 0.2408467, 2)
def on_venus(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD) / 0.61519726, 2)
def on_earth(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD), 2)
def on_mars(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD) / 1.8808158, 2)
def on_jupiter(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD) / 11.862615, 2)
def on_saturn(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD) / 29.447498, 2)
def on_uranus(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD) / 84.016846, 2)
def on_neptune(self) -> float:
return round(self.seconds / float(self.ORBITAL_PERIOD) / 164.79132, 2)
| 33.258065 | 79 | 0.659554 | [
"MIT"
] | PlugaruT/exercism-playground | python/space-age/space_age.py | 1,031 | Python |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import support
from heat.tests import common
class SupportStatusTest(common.HeatTestCase):
def test_valid_status(self):
for sstatus in support.SUPPORT_STATUSES:
previous = support.SupportStatus(version='test_version')
status = support.SupportStatus(
status=sstatus,
message='test_message',
version='test_version',
previous_status=previous,
)
self.assertEqual(sstatus, status.status)
self.assertEqual('test_message', status.message)
self.assertEqual('test_version', status.version)
self.assertEqual(previous, status.previous_status)
self.assertEqual({
'status': sstatus,
'message': 'test_message',
'version': 'test_version',
'previous_status': {'status': 'SUPPORTED',
'message': None,
'version': 'test_version',
'previous_status': None},
}, status.to_dict())
def test_invalid_status(self):
status = support.SupportStatus(
status='RANDOM',
message='test_message',
version='test_version',
previous_status=support.SupportStatus()
)
self.assertEqual(support.UNKNOWN, status.status)
self.assertEqual('Specified status is invalid, defaulting to UNKNOWN',
status.message)
self.assertIsNone(status.version)
self.assertIsNone(status.previous_status)
self.assertEqual({
'status': 'UNKNOWN',
'message': 'Specified status is invalid, defaulting to UNKNOWN',
'version': None,
'previous_status': None,
}, status.to_dict())
def test_previous_status(self):
sstatus = support.SupportStatus(
status=support.DEPRECATED,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.SUPPORTED,
version='2015.1'
)
)
self.assertEqual(support.DEPRECATED, sstatus.status)
self.assertEqual('5.0.0', sstatus.version)
self.assertEqual(support.SUPPORTED, sstatus.previous_status.status)
self.assertEqual('2015.1', sstatus.previous_status.version)
self.assertEqual({'status': 'DEPRECATED',
'version': '5.0.0',
'message': None,
'previous_status': {'status': 'SUPPORTED',
'version': '2015.1',
'message': None,
'previous_status': None}},
sstatus.to_dict())
def test_invalid_previous_status(self):
ex = self.assertRaises(ValueError,
support.SupportStatus, previous_status='YARRR')
self.assertEqual('previous_status must be SupportStatus '
'instead of %s' % str, str(ex))
| 41.186813 | 78 | 0.564301 | [
"Apache-2.0"
] | HyunJin-Jeong/heat | heat/tests/test_support.py | 3,748 | Python |
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by David O'Brien
# Copyright (c) 2017 David O'Brien
#
# License: MIT
#
"""Exports the Vcom plugin class."""
from SublimeLinter.lint import Linter
class Vcom(Linter):
"""Provides an interface to vcom (Mentor Modelsim)."""
syntax = ('vhdl')
cmd = 'vcom -2008 -work work @'
tempfile_suffix = 'vhd'
# SAMPLE ERRORS
# ** Error: (vcom-13069) .\fname.v(9): near "reg": syntax error, unexpected reg, expecting ';' or ','.
# ** Error: (vcom-13069) .\fname.v(9): Unknown identifier "var": syntax error, unexpected reg, expecting ';' or ','.
# ** Error (suppressible): .\fname.sv(46): (vlog-2388) 'var' already declared in this scope (mname).
# ** Error: (vlog-13069) .\fname.sv(45): near "==": syntax error, unexpected ==, expecting ';' or ','.
regex = (
r'^\*\* (((?P<error>Error)|(?P<warning>Warning))' # Error
r'( \(suppressible\))?: )' # Maybe suppressible
r'(\([a-z]+-[0-9]+\) )?' # Error code - sometimes before
r'([^\(]*\((?P<line>[0-9]+)\): )' # File and line
r'(\([a-z]+-[0-9]+\) )?' # Error code - sometimes after
r'(?P<message>' # Start of message
r'(((near|Unknown identifier|Undefined variable):? )?' # Near/Unknown/Unidentified
r'["\'](?P<near>[\w=:;\.]+)["\']' # Identifier
r'[ :.]*)?' # Near terminator
r'.*)' # End of message
)
def split_match(self, match):
"""Override this method to prefix the error message with the lint binary name."""
match, line, col, error, warning, message, near = super().split_match(match)
if match:
message = '[vcom] ' + message
return match, line, col, error, warning, message, near
| 43 | 120 | 0.503876 | [
"MIT"
] | dave2pi/SublimeLinter-contrib-vcom | linter.py | 2,064 | Python |
import paho.mqtt.client as mqtt
import time
import argparse
from tinydb import TinyDB, Query
from tinyrecord import transaction
import logging
import sys
import json
import threading
import ssl
from random import randint
CA_ROOT_CERT_FILE = "ag-certificate/AmazonRootCA1.pem"
THING_CERT_FILE = "ag-certificate/..."
THING_PRIVATE_KEY = "ag-certificate/..."
# init args parser
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"MQTT_broker", metavar="MQTT_broker", type=str, help="Address of the MQTT broker"
)
args = parser.parse_args()
# init logger
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
logger = logging.getLogger()
fileHandler = logging.FileHandler("{0}/{1}.log".format("log", f"agenceur"))
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
# init opaque DB
db_opaque = TinyDB("opaque.json")
# init clear measures DB
db_measures = TinyDB("measures.json")
db_measures.truncate()
lock = threading.Lock() # on received message
def on_message(client, userdata, message):
with lock:
logger.debug(
"rcvd: " + message.topic + "/" + str(message.payload.decode("utf-8"))
)
if message.topic == "addToPool":
# store in DB
logger.info(f"storing payload")
db_opaque.insert({"entry": str(message.payload.decode("utf-8"))})
if message.topic == "requestPool":
asking = str(message.payload.decode("utf-8"))
logger.info(f"received pool request from {asking}")
completePool = db_opaque.all()
# truncate because we will save it again?
db_opaque.truncate()
# dont sent if pool is empty
if len(completePool):
to_send = []
if len(completePool) > 10:
for i in range(10):
to_send.append(completePool.pop(0))
else:
to_send = completePool.copy()
completePool.clear()
for left in completePool:
db_opaque.insert(left)
#logger.info(f"to_send: {to_send}")
table_json = json.dumps(to_send)
logger.info(f"publishing table to getPool{asking}, len={len(table_json)}, n={len(to_send)}")
client.publish(f"getPool{asking}", table_json, qos=1)
if message.topic == "measures":
j = json.loads(message.payload.decode("utf-8"))
logger.info(f"m: {message.payload}")
db_measures.insert({"entry": message.payload.decode("utf-8")})
logger.info(f"received measure {j['MUID']}")
# connecting to MQTT broker
logger.info(f"Connecting to broker at {args.MQTT_broker}")
client = mqtt.Client("Agenceur")
client.tls_set(CA_ROOT_CERT_FILE, certfile=THING_CERT_FILE, keyfile=THING_PRIVATE_KEY)#, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
client.connect(args.MQTT_broker, 8883)
# client.enable_logger()
# start receive thread
client.loop_start()
# subscribe to
# * addToPool: endpoint for opaque payload
# * requestPool: endpoint for opaque pool request from devices
# * measures: endpoint for clear-data measures
client.subscribe("addToPool")
client.subscribe("requestPool")
client.subscribe("measures")
# register receive routine
client.on_message = on_message
# only on event execution
while True:
time.sleep(1)
client.loop_stop()
| 32.068966 | 165 | 0.661022 | [
"MIT"
] | alexismarquet/MSE-TM-nRF9160 | application/AWS-ag.py | 3,720 | Python |
from Graphs import *
G1 = Graph(NodeSize=.2)
G1.addNode([-2,0])
G1.addNode([-2,1.3])
G1.addNode([-2.7,-1])
G1.addNode([-2.2,-2.3])
G1.addEdges([0,0,0,2],[1,2,3,3])
G2 = Graph(NodeSize=.2)
G2.addNode([-1,0])
G2.addNode([0,0])
G2.addNode([1,.5])
G2.addNode([2,-.5])
G2.addNode([-1.5,.5])
G2.addEdges([0,1,1,0],[1,2,3,4])
def addGraphRoot(GA,GB,r1,r2):
r = [i.copy() for i in GB.pos]
x = GA.pos[r1][0]-GB.pos[r2][0]
y = GA.pos[r1][1]-GB.pos[r2][1]
for z in range(len(r)):
r[z][0] += x
r[z][1] += y
del r[r2]
G = GB.Mat.copy()
rt = G[:,r2]
G = np.delete(G,r2,0)
G = np.delete(G,r2,1)
GA.addNodes(r)
GA.Mat[GA.size-GB.size+1:GA.size,GA.size-GB.size+1:GA.size] = G
for pos,ed in enumerate(rt):
if ed == 1:
GA.addEdges([r1],[GA.size-GB.size+pos])
for i in range(G1.size):
addGraphRoot(G1,G2,i,0)
print(edgeDict(G1.Mat))
makeCanvas(xlim=[-4,4],ylim=[-4,4],size=[10,10])
G1.drawNodes()
G1.drawLines()
G1.drawText() | 20.377358 | 68 | 0.513889 | [
"MIT"
] | AlexanderFraebel/GraphDrawing | GraphProducts.py | 1,080 | Python |
# -*- coding: utf-8 -*-
"""
Doors
AOE rider
"""
# Adding to the system path is needed
# because no longer in parent directory
# and I want to run this file as a script
import sys, os
sys.path.append(os.path.abspath('../'))
import farmbot as fb
class Farmer_Doors(fb.Farmbot):
def __init__(self):
fb.Farmbot.__init__(self,'blue','../')
def wave1(self):
res = self.advancestart()
if res < 0:
return -1
# Skills selection (may be empty)
# Attack
res = self.attack()
if res < 0:
return -1
# Card selection (pick 3)
self.usecard(self.xy_npc)
self.usecard(self.xy_card4)
self.usecard(self.xy_card3)
return 0
def wave2(self):
res = self.advancewave()
if res < 0:
return -1
# Skills selection (may be empty)
# Attack
res = self.attack()
if res < 0:
return -1
# Card selection (pick 3)
self.usecard(self.xy_npb)
self.usecard(self.xy_card3)
self.usecard(self.xy_card4)
return 0
def wave3(self):
res = self.advancewave()
if res < 0:
return -1
# Skills selection (may be empty)
# Attack
res = self.attack()
if res < 0:
return -1
# Card selection (pick 3)
self.usecard(self.xy_npa)
self.usecard(self.xy_card2)
self.usecard(self.xy_card3)
return 0
def farm(self,nruns=1):
self.runs = 0
self.refills = 0
self.refilltype = 'gapple' # [rapple,gapple,sapple,bapple]
self.supportce = 'teatime' # [lunchtime,training,lesson,monalisa,eventspecific]
self.supportservant = 'skadi' # [waver,skadi]
self.saveframe = False
while True:
# Start quest (set it up for the farmer)
# Repeat quest no longer uses the party screen
# Battle procedure Wave1
res = self.wave1()
if res < 0:
return -1
# Battle prodedure Wave2
res = self.wave2()
if res < 0:
return -1
# Battle prodedure Wave3
res = self.wave3()
if res < 0:
return -1
# Finished run
res = self.finishbattle()
if res < 0:
return -1
self.runs += 1
# Exit out to main menu if finished
if self.runs >= nruns:
res = self.norepeatquest()
break
# Repeat quest if not done (automatic refills)
res = self.repeatquestrefill()
if res < 0:
return -1
# Select new support
res = self.selectsupport()
if res < 0:
return -1
return self.runs
def farmalarm(self, nruns=1):
res = self.farm(nruns)
print(res)
self.playalarm()
return
if __name__ == "__main__":
farmer = Farmer_Doors()
farmer.activate() | 23.637168 | 83 | 0.594534 | [
"MIT"
] | fryougi/farmbot | nodes/doors.py | 2,671 | Python |
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
from Configuration.Eras.Modifier_stage2L1Trigger_2017_cff import stage2L1Trigger_2017
def L1TCaloStage2ParamsForHW(process):
process.load("L1Trigger.L1TCalorimeter.caloStage2Params_HWConfig_cfi")
return process
def L1TAddBitwiseLayer1(process):
from L1Trigger.L1TCaloLayer1.simCaloStage2Layer1Digis_cfi import simCaloStage2Layer1Digis as simCaloStage2BitwiseLayer1Digis
from L1Trigger.L1TCalorimeter.simCaloStage2Digis_cfi import simCaloStage2Digis as simCaloStage2BitwiseDigis
process.simCaloStage2BitwiseLayer1Digis = simCaloStage2BitwiseLayer1Digis.clone()
process.simCaloStage2BitwiseLayer1Digis.ecalToken = cms.InputTag("ecalDigis:EcalTriggerPrimitives")
process.simCaloStage2BitwiseDigis = simCaloStage2BitwiseDigis.clone()
process.simCaloStage2BitwiseDigis.towerToken = cms.InputTag("simCaloStage2BitwiseLayer1Digis")
process.SimL1TCalorimeter = cms.Sequence( process.simCaloStage2Layer1Digis + process.simCaloStage2Digis + process.simCaloStage2BitwiseLayer1Digis + process.simCaloStage2BitwiseDigis)
from L1Trigger.L1TNtuples.l1UpgradeTree_cfi import l1UpgradeTree
process.l1UpgradeBitwiseTree = l1UpgradeTree.clone()
process.l1UpgradeBitwiseTree.egToken = cms.untracked.InputTag("simCaloStage2BitwiseDigis")
process.l1UpgradeBitwiseTree.tauTokens = cms.untracked.VInputTag("simCaloStage2BitwiseDigis")
process.l1UpgradeBitwiseTree.jetToken = cms.untracked.InputTag("simCaloStage2BitwiseDigis")
process.l1UpgradeBitwiseTree.muonToken = cms.untracked.InputTag("simGmtStage2Digis")
process.l1UpgradeBitwiseTree.sumToken = cms.untracked.InputTag("simCaloStage2BitwiseDigis")
process.l1ntuplebitwise = cms.Path(
process.l1UpgradeBitwiseTree
)
process.schedule.append(process.l1ntuplebitwise)
print "# modified L1TReEmul: "
print "# {0}".format(process.L1TReEmul)
return process
# As of 80X, this ES configuration is needed for *data* GTs (mc tags work w/o)
def L1TEventSetupForHF1x1TPs(process):
process.es_pool_hf1x1 = cms.ESSource(
"PoolDBESSource",
#process.CondDBSetup,
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("HcalLutMetadataRcd"),
tag = cms.string("HcalLutMetadata_HFTP_1x1")
),
cms.PSet(record = cms.string("HcalElectronicsMapRcd"),
tag = cms.string("HcalElectronicsMap_HFTP_1x1")
)
),
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
authenticationMethod = cms.untracked.uint32(0)
)
process.es_prefer_es_pool_hf1x1 = cms.ESPrefer("PoolDBESSource", "es_pool_hf1x1")
return process
def L1TReEmulFromRAW2015(process):
process.load('L1Trigger.Configuration.SimL1Emulator_cff')
process.load('L1Trigger.Configuration.CaloTriggerPrimitives_cff')
process.simEcalTriggerPrimitiveDigis.Label = 'ecalDigis'
process.simHcalTriggerPrimitiveDigis.inputLabel = cms.VInputTag(
cms.InputTag('hcalDigis'),
cms.InputTag('hcalDigis')
)
process.L1TReEmul = cms.Sequence(process.simEcalTriggerPrimitiveDigis * process.simHcalTriggerPrimitiveDigis * process.SimL1Emulator)
process.simDtTriggerPrimitiveDigis.digiTag = 'muonDTDigis'
process.simCscTriggerPrimitiveDigis.CSCComparatorDigiProducer = cms.InputTag( 'muonCSCDigis', 'MuonCSCComparatorDigi')
process.simCscTriggerPrimitiveDigis.CSCWireDigiProducer = cms.InputTag( 'muonCSCDigis', 'MuonCSCWireDigi' )
if stage2L1Trigger.isChosen():
process.simTwinMuxDigis.RPC_Source = cms.InputTag('muonRPCDigis')
# When available, this will switch to TwinMux input Digis:
process.simTwinMuxDigis.DTDigi_Source = cms.InputTag("dttfDigis")
process.simTwinMuxDigis.DTThetaDigi_Source = cms.InputTag("dttfDigis")
process.simOmtfDigis.srcRPC = cms.InputTag('muonRPCDigis')
process.simBmtfDigis.DTDigi_Source = cms.InputTag("simTwinMuxDigis")
process.simBmtfDigis.DTDigi_Theta_Source = cms.InputTag("dttfDigis")
process.simEmtfDigis.CSCInput = cms.InputTag("csctfDigis")
process.simEmtfDigis.RPCInput = cms.InputTag('muonRPCDigis')
process.simOmtfDigis.srcCSC = cms.InputTag("csctfDigis")
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag("ecalDigis:EcalTriggerPrimitives")
process.L1TReEmulPath = cms.Path(process.L1TReEmul)
process.schedule.append(process.L1TReEmulPath)
# quiet warning abouts missing Stage-2 payloads, since they won't reliably exist in 2015 data.
if hasattr(process, "caloStage2Digis"):
process.caloStage2Digis.MinFeds = cms.uint32(0)
if hasattr(process, "gmtStage2Digis"):
process.gmtStage2Digis.MinFeds = cms.uint32(0)
if hasattr(process, "gtStage2Digis"):
process.gtStage2Digis.MinFeds = cms.uint32(0)
else:
process.simRctDigis.ecalDigis = cms.VInputTag('simEcalTriggerPrimitiveDigis')
process.simRctDigis.hcalDigis = cms.VInputTag('simHcalTriggerPrimitiveDigis')
process.simRpcTriggerDigis.label = 'muonRPCDigis'
process.simRpcTechTrigDigis.RPCDigiLabel = 'muonRPCDigis'
process.L1TReEmulPath = cms.Path(process.L1TReEmul)
process.schedule.append(process.L1TReEmulPath)
print "# L1TReEmul sequence: "
print "# {0}".format(process.L1TReEmul)
print "# {0}".format(process.schedule)
return process
def L1TReEmulMCFromRAW2015(process):
L1TReEmulFromRAW2015(process)
if stage2L1Trigger.isChosen():
process.simEmtfDigis.CSCInput = cms.InputTag('simCscTriggerPrimitiveDigis','MPCSORTED')
process.simOmtfDigis.srcCSC = cms.InputTag('simCscTriggerPrimitiveDigis','MPCSORTED')
return process
def L1TReEmulFromRAW2015simCaloTP(process):
L1TReEmulFromRAW2015(process)
if stage2L1Trigger.isChosen():
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag("simEcalTriggerPrimitiveDigis")
return process
def L1TReEmulFromRAW2016(process):
process.load('L1Trigger.Configuration.SimL1Emulator_cff')
process.load('L1Trigger.Configuration.CaloTriggerPrimitives_cff')
process.simEcalTriggerPrimitiveDigis.Label = 'ecalDigis'
process.simHcalTriggerPrimitiveDigis.inputLabel = cms.VInputTag(
cms.InputTag('hcalDigis'),
cms.InputTag('hcalDigis')
)
process.simHcalTriggerPrimitiveDigis.inputUpgradeLabel = cms.VInputTag(
cms.InputTag('hcalDigis'),
cms.InputTag('hcalDigis')
)
process.simCscTriggerPrimitiveDigis.CSCComparatorDigiProducer = cms.InputTag( 'muonCSCDigis', 'MuonCSCComparatorDigi')
process.simCscTriggerPrimitiveDigis.CSCWireDigiProducer = cms.InputTag( 'muonCSCDigis', 'MuonCSCWireDigi' )
process.L1TReEmul = cms.Sequence(process.simEcalTriggerPrimitiveDigis * process.simHcalTriggerPrimitiveDigis * process.SimL1Emulator)
if stage2L1Trigger.isChosen():
#cutlist=['simDtTriggerPrimitiveDigis','simCscTriggerPrimitiveDigis']
#for b in cutlist:
# process.SimL1Emulator.remove(getattr(process,b))
# TwinMux
process.simTwinMuxDigis.RPC_Source = cms.InputTag('RPCTwinMuxRawToDigi')
process.simTwinMuxDigis.DTDigi_Source = cms.InputTag('twinMuxStage2Digis:PhIn')
process.simTwinMuxDigis.DTThetaDigi_Source = cms.InputTag('twinMuxStage2Digis:ThIn')
# BMTF
process.simBmtfDigis.DTDigi_Source = cms.InputTag('simTwinMuxDigis')
process.simBmtfDigis.DTDigi_Theta_Source = cms.InputTag('bmtfDigis')
# OMTF
process.simOmtfDigis.srcRPC = cms.InputTag('muonRPCDigis')
process.simOmtfDigis.srcCSC = cms.InputTag('csctfDigis')
process.simOmtfDigis.srcDTPh = cms.InputTag('bmtfDigis')
process.simOmtfDigis.srcDTTh = cms.InputTag('bmtfDigis')
# EMTF
process.simEmtfDigis.CSCInput = cms.InputTag('emtfStage2Digis')
process.simEmtfDigis.RPCInput = cms.InputTag('muonRPCDigis')
# Calo Layer1
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag('ecalDigis:EcalTriggerPrimitives')
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('hcalDigis:')
process.L1TReEmulPath = cms.Path(process.L1TReEmul)
process.schedule.append(process.L1TReEmulPath)
return process
else:
process.simRctDigis.ecalDigis = cms.VInputTag( cms.InputTag( 'ecalDigis:EcalTriggerPrimitives' ) )
process.simRctDigis.hcalDigis = cms.VInputTag('hcalDigis:')
process.simRpcTriggerDigis.label = 'muonRPCDigis'
process.L1TReEmulPath = cms.Path(process.L1TReEmul)
process.schedule.append(process.L1TReEmulPath)
return process
def L1TReEmulFromRAW(process):
L1TReEmulFromRAW2016(process)
if stage2L1Trigger_2017.isChosen():
process.simOmtfDigis.srcRPC = cms.InputTag('omtfStage2Digis')
process.simOmtfDigis.srcCSC = cms.InputTag('omtfStage2Digis')
process.simOmtfDigis.srcDTPh = cms.InputTag('omtfStage2Digis')
process.simOmtfDigis.srcDTTh = cms.InputTag('omtfStage2Digis')
print "# L1TReEmul sequence: "
print "# {0}".format(process.L1TReEmul)
print "# {0}".format(process.schedule)
return process
def L1TReEmulFromRAWCalouGT(process):
L1TReEmulFromRAW(process)
process.simGtStage2Digis.MuonInputTag = cms.InputTag("gtStage2Digis","Muon")
return process
def L1TReEmulMCFromRAW(process):
L1TReEmulFromRAW(process)
if stage2L1Trigger.isChosen():
process.simEmtfDigis.CSCInput = cms.InputTag('simCscTriggerPrimitiveDigis','MPCSORTED')
process.simOmtfDigis.srcCSC = cms.InputTag('simCscTriggerPrimitiveDigis','MPCSORTED')
return process
def L1TReEmulMCFromRAWSimEcalTP(process):
L1TReEmulMCFromRAW(process)
if stage2L1Trigger.isChosen():
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag("simEcalTriggerPrimitiveDigis")
return process
def L1TReEmulMCFromRAWSimHcalTP(process):
L1TReEmulMCFromRAW(process)
if stage2L1Trigger.isChosen():
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('simHcalTriggerPrimitiveDigis')
return process
def L1TReEmulMCFrom90xRAWSimHcalTP(process):
L1TReEmulMCFromRAW(process)
if stage2L1Trigger.isChosen():
process.simHcalTriggerPrimitiveDigis.inputLabel = cms.VInputTag(
cms.InputTag('simHcalUnsuppressedDigis'),
cms.InputTag('simHcalUnsuppressedDigis')
)
process.simHcalTriggerPrimitiveDigis.inputUpgradeLabel = cms.VInputTag(
cms.InputTag('simHcalUnsuppressedDigis:HBHEQIE11DigiCollection'),
cms.InputTag('simHcalUnsuppressedDigis:HFQIE10DigiCollection')
)
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('simHcalTriggerPrimitiveDigis')
return process
#inputUpgradeLabel = cms.VInputTag(
# cms.InputTag('simHcalUnsuppressedDigis:HBHEQIE11DigiCollection'),
# cms.InputTag('simHcalUnsuppressedDigis:HFQIE10DigiCollection')),
def L1TReEmulMCFromRAWSimCalTP(process):
L1TReEmulMCFromRAW(process)
if stage2L1Trigger.isChosen():
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag("simEcalTriggerPrimitiveDigis")
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('simHcalTriggerPrimitiveDigis')
return process
def L1TReEmulFromRAWsimEcalTP(process):
L1TReEmulFromRAW(process)
if stage2L1Trigger.isChosen():
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag("simEcalTriggerPrimitiveDigis")
return process
def L1TReEmulFromRAWsimHcalTP(process):
L1TReEmulFromRAW(process)
if stage2L1Trigger.isChosen():
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('simHcalTriggerPrimitiveDigis')
return process
def L1TReEmulFromRAWsimTP(process):
L1TReEmulFromRAW(process)
if stage2L1Trigger.isChosen():
# TwinMux
process.simTwinMuxDigis.RPC_Source = cms.InputTag('muonRPCDigis')
process.simTwinMuxDigis.DTDigi_Source = cms.InputTag('simDtTriggerPrimitiveDigis')
process.simTwinMuxDigis.DTThetaDigi_Source = cms.InputTag('simDtTriggerPrimitiveDigis')
# BMTF
process.simBmtfDigis.DTDigi_Source = cms.InputTag('simTwinMuxDigis')
process.simBmtfDigis.DTDigi_Theta_Source = cms.InputTag('simDtTriggerPrimitiveDigis')
# OMTF
process.simOmtfDigis.srcRPC = cms.InputTag('muonRPCDigis')
process.simOmtfDigis.srcCSC = cms.InputTag('simCscTriggerPrimitiveDigis')
process.simOmtfDigis.srcDTPh = cms.InputTag('simDtTriggerPrimitiveDigis')
process.simOmtfDigis.srcDTTh = cms.InputTag('simDtTriggerPrimitiveDigis')
# EMTF
process.simEmtfDigis.CSCInput = cms.InputTag('simCscTriggerPrimitiveDigis')
process.simEmtfDigis.RPCInput = cms.InputTag('muonRPCDigis')
# Layer1
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag("simEcalTriggerPrimitiveDigis")
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('simHcalTriggerPrimitiveDigis')
return process
def L1TReEmulFromRAWLegacyMuon(process):
process.load('L1Trigger.Configuration.SimL1Emulator_cff')
process.load('L1Trigger.Configuration.CaloTriggerPrimitives_cff')
process.simEcalTriggerPrimitiveDigis.Label = 'ecalDigis'
process.simHcalTriggerPrimitiveDigis.inputLabel = cms.VInputTag(
cms.InputTag('hcalDigis'),
cms.InputTag('hcalDigis')
)
## - Legacy to upgrade format muon converter
process.load('L1Trigger.L1TCommon.muonLegacyInStage2FormatDigis_cfi')
process.muonLegacyInStage2FormatDigis.muonSource = cms.InputTag('simGmtDigis')
## - DT TP emulator
from L1Trigger.DTTrigger.dtTriggerPrimitiveDigis_cfi import dtTriggerPrimitiveDigis
process.simDtTriggerPrimitiveDigis = dtTriggerPrimitiveDigis.clone()
process.simDtTriggerPrimitiveDigis.digiTag = cms.InputTag('muonDTDigis')
## - TwinMux
from L1Trigger.L1TTwinMux.simTwinMuxDigis_cfi import simTwinMuxDigis
process.simTwinMuxDigisForDttf = simTwinMuxDigis.clone()
process.simTwinMuxDigisForDttf.RPC_Source = cms.InputTag('muonRPCDigis')
process.simTwinMuxDigisForDttf.DTDigi_Source = cms.InputTag('bmtfDigis')
process.simTwinMuxDigisForDttf.DTThetaDigi_Source = cms.InputTag('bmtfDigis')
## - CSC TP emulator
from L1Trigger.CSCTriggerPrimitives.cscTriggerPrimitiveDigis_cfi import cscTriggerPrimitiveDigis
process.simCscTriggerPrimitiveDigis = cscTriggerPrimitiveDigis.clone()
process.simCscTriggerPrimitiveDigis.CSCComparatorDigiProducer = cms.InputTag( 'muonCSCDigis', 'MuonCSCComparatorDigi' )
process.simCscTriggerPrimitiveDigis.CSCWireDigiProducer = cms.InputTag( 'muonCSCDigis', 'MuonCSCWireDigi' )
#
# - CSC Track Finder emulator
#
from L1Trigger.CSCTrackFinder.csctfTrackDigis_cfi import csctfTrackDigis
process.simCsctfTrackDigis = csctfTrackDigis.clone()
process.simCsctfTrackDigis.SectorReceiverInput = cms.untracked.InputTag( 'csctfDigis' )
process.simCsctfTrackDigis.DTproducer = 'simDtTriggerPrimitiveDigis'
from L1Trigger.CSCTrackFinder.csctfDigis_cfi import csctfDigis
process.simCsctfDigis = csctfDigis.clone()
process.simCsctfDigis.CSCTrackProducer = 'simCsctfTrackDigis'
##
## - DT Track Finder emulator
##
from L1Trigger.DTTrackFinder.dttfDigis_cfi import dttfDigis
process.simDttfDigis = dttfDigis.clone()
process.simDttfDigis.DTDigi_Source = 'simTwinMuxDigisForDttf'
process.simDttfDigis.CSCStub_Source = 'simCsctfTrackDigis'
##
## - RPC PAC Trigger emulator
##
from L1Trigger.RPCTrigger.rpcTriggerDigis_cff import rpcTriggerDigis
process.load('L1Trigger.RPCTrigger.RPCConeConfig_cff')
process.simRpcTriggerDigis = rpcTriggerDigis.clone()
process.simRpcTriggerDigis.label = 'muonRPCDigis'
process.simRpcTriggerDigis.RPCTriggerDebug = cms.untracked.int32(1)
##
## - Legacy Global Muon Trigger emulator
##
from L1Trigger.GlobalMuonTrigger.gmtDigis_cfi import gmtDigis
process.simGmtDigis = gmtDigis.clone()
process.simGmtDigis.DTCandidates = cms.InputTag( 'simDttfDigis', 'DT' )
process.simGmtDigis.CSCCandidates = cms.InputTag( 'simCsctfDigis', 'CSC' )
process.simGmtDigis.RPCbCandidates = cms.InputTag( 'simRpcTriggerDigis', 'RPCb' )
process.simGmtDigis.RPCfCandidates = cms.InputTag( 'simRpcTriggerDigis', 'RPCf' )
# This is for the upgrade
# BMTF
process.simBmtfDigis.DTDigi_Source = cms.InputTag('bmtfDigis')
process.simBmtfDigis.DTDigi_Theta_Source = cms.InputTag('bmtfDigis')
# TwinMux
process.simTwinMuxDigis.RPC_Source = cms.InputTag('muonRPCDigis')
process.simTwinMuxDigis.DTDigi_Source = cms.InputTag('bmtfDigis')
process.simTwinMuxDigis.DTThetaDigi_Source = cms.InputTag('bmtfDigis')
# OMTF
process.simOmtfDigis.srcRPC = cms.InputTag('muonRPCDigis')
process.simOmtfDigis.srcCSC = cms.InputTag('csctfDigis')
process.simOmtfDigis.srcDTPh = cms.InputTag('bmtfDigis')
process.simOmtfDigis.srcDTTh = cms.InputTag('bmtfDigis')
# EMTF
process.simEmtfDigis.CSCInput = cms.InputTag('emtfStage2Digis')
process.simEmtfDigis.RPCInput = cms.InputTag('muonRPCDigis')
# Calo Layer1
process.simCaloStage2Layer1Digis.ecalToken = cms.InputTag('ecalDigis:EcalTriggerPrimitives')
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('hcalDigis:')
# - Sequences
process.L1TMuonTriggerPrimitives = cms.Sequence(process.simCscTriggerPrimitiveDigis + process.simDtTriggerPrimitiveDigis + process.simTwinMuxDigisForDttf)
process.L1TReEmul = cms.Sequence(process.L1TMuonTriggerPrimitives + process.simCsctfTrackDigis + process.simCsctfDigis + process.simDttfDigis + process.simRpcTriggerDigis + process.simGmtDigis + process.muonLegacyInStage2FormatDigis)
process.load('L1Trigger.L1TMuon.simMuonQualityAdjusterDigis_cfi')
process.L1TReEmul = cms.Sequence( process.L1TReEmul + process.simTwinMuxDigis + process.simBmtfDigis + process.simEmtfDigis + process.simOmtfDigis + process.simGmtCaloSumDigis + process.simMuonQualityAdjusterDigis + process.simGmtStage2Digis)
process.L1TReEmul = cms.Sequence( process.L1TReEmul + process.SimL1TechnicalTriggers + process.SimL1TGlobal )
process.L1TReEmulPath = cms.Path(process.L1TReEmul)
process.schedule.append(process.L1TReEmulPath)
print "# L1TReEmul sequence: "
print "# {0}".format(process.L1TReEmul)
print "# {0}".format(process.schedule)
return process
| 52.863388 | 246 | 0.733151 | [
"Apache-2.0"
] | nistefan/cmssw | L1Trigger/Configuration/python/customiseReEmul.py | 19,348 | Python |
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
from os import path as osp
from typing import List, Text
import torch
__all__ = ['change_key', 'get_cell_based_tiny_net', 'get_search_spaces', 'get_cifar_models', 'get_imagenet_models', \
'obtain_model', 'obtain_search_model', 'load_net_from_checkpoint', \
'CellStructure', 'CellArchitectures'
]
# useful modules
from config_utils import dict2config
from models.SharedUtils import change_key
from models.cell_searchs import CellStructure, CellArchitectures
# Cell-based NAS Models
def get_cell_based_tiny_net(config):
if isinstance(config, dict): config = dict2config(config, None) # to support the argument being a dict
super_type = getattr(config, 'super_type', 'basic')
group_names = ['DARTS-V1', 'DARTS-V2', 'GDAS', 'SETN', 'ENAS', 'RANDOM', 'generic']
if super_type == 'basic' and config.name in group_names:
from .cell_searchs import nas201_super_nets as nas_super_nets
try:
return nas_super_nets[config.name](config.C, config.N, config.max_nodes, config.num_classes, config.space, config.affine, config.track_running_stats)
except:
return nas_super_nets[config.name](config.C, config.N, config.max_nodes, config.num_classes, config.space)
elif super_type == 'search-shape':
from .shape_searchs import GenericNAS301Model
genotype = CellStructure.str2structure(config.genotype)
return GenericNAS301Model(config.candidate_Cs, config.max_num_Cs, genotype, config.num_classes, config.affine, config.track_running_stats)
elif super_type == 'nasnet-super':
from .cell_searchs import nasnet_super_nets as nas_super_nets
return nas_super_nets[config.name](config.C, config.N, config.steps, config.multiplier, \
config.stem_multiplier, config.num_classes, config.space, config.affine, config.track_running_stats)
elif config.name == 'infer.tiny':
from .cell_infers import TinyNetwork
if hasattr(config, 'genotype'):
genotype = config.genotype
elif hasattr(config, 'arch_str'):
genotype = CellStructure.str2structure(config.arch_str)
else: raise ValueError('Can not find genotype from this config : {:}'.format(config))
return TinyNetwork(config.C, config.N, genotype, config.num_classes)
elif config.name == 'infer.shape.tiny':
from .shape_infers import DynamicShapeTinyNet
if isinstance(config.channels, str):
channels = tuple([int(x) for x in config.channels.split(':')])
else: channels = config.channels
genotype = CellStructure.str2structure(config.genotype)
return DynamicShapeTinyNet(channels, genotype, config.num_classes)
elif config.name == 'infer.nasnet-cifar':
from .cell_infers import NASNetonCIFAR
raise NotImplementedError
else:
raise ValueError('invalid network name : {:}'.format(config.name))
# obtain the search space, i.e., a dict mapping the operation name into a python-function for this op
def get_search_spaces(xtype, name) -> List[Text]:
if xtype == 'cell' or xtype == 'tss': # The topology search space.
from .cell_operations import SearchSpaceNames
assert name in SearchSpaceNames, 'invalid name [{:}] in {:}'.format(name, SearchSpaceNames.keys())
return SearchSpaceNames[name]
elif xtype == 'sss': # The size search space.
if name == 'nas-bench-301' or name == 'nats-bench' or name == 'nats-bench-size':
return {'candidates': [8, 16, 24, 32, 40, 48, 56, 64],
'numbers': 5}
else:
raise ValueError('Invalid name : {:}'.format(name))
else:
raise ValueError('invalid search-space type is {:}'.format(xtype))
def get_cifar_models(config, extra_path=None):
super_type = getattr(config, 'super_type', 'basic')
if super_type == 'basic':
from .CifarResNet import CifarResNet
from .CifarDenseNet import DenseNet
from .CifarWideResNet import CifarWideResNet
if config.arch == 'resnet':
return CifarResNet(config.module, config.depth, config.class_num, config.zero_init_residual)
elif config.arch == 'densenet':
return DenseNet(config.growthRate, config.depth, config.reduction, config.class_num, config.bottleneck)
elif config.arch == 'wideresnet':
return CifarWideResNet(config.depth, config.wide_factor, config.class_num, config.dropout)
else:
raise ValueError('invalid module type : {:}'.format(config.arch))
elif super_type.startswith('infer'):
from .shape_infers import InferWidthCifarResNet
from .shape_infers import InferDepthCifarResNet
from .shape_infers import InferCifarResNet
from .cell_infers import NASNetonCIFAR
assert len(super_type.split('-')) == 2, 'invalid super_type : {:}'.format(super_type)
infer_mode = super_type.split('-')[1]
if infer_mode == 'width':
return InferWidthCifarResNet(config.module, config.depth, config.xchannels, config.class_num, config.zero_init_residual)
elif infer_mode == 'depth':
return InferDepthCifarResNet(config.module, config.depth, config.xblocks, config.class_num, config.zero_init_residual)
elif infer_mode == 'shape':
return InferCifarResNet(config.module, config.depth, config.xblocks, config.xchannels, config.class_num, config.zero_init_residual)
elif infer_mode == 'nasnet.cifar':
genotype = config.genotype
if extra_path is not None: # reload genotype by extra_path
if not osp.isfile(extra_path): raise ValueError('invalid extra_path : {:}'.format(extra_path))
xdata = torch.load(extra_path)
current_epoch = xdata['epoch']
genotype = xdata['genotypes'][current_epoch-1]
C = config.C if hasattr(config, 'C') else config.ichannel
N = config.N if hasattr(config, 'N') else config.layers
return NASNetonCIFAR(C, N, config.stem_multi, config.class_num, genotype, config.auxiliary)
else:
raise ValueError('invalid infer-mode : {:}'.format(infer_mode))
else:
raise ValueError('invalid super-type : {:}'.format(super_type))
def get_imagenet_models(config):
super_type = getattr(config, 'super_type', 'basic')
if super_type == 'basic':
from .ImageNet_ResNet import ResNet
from .ImageNet_MobileNetV2 import MobileNetV2
if config.arch == 'resnet':
return ResNet(config.block_name, config.layers, config.deep_stem, config.class_num, config.zero_init_residual, config.groups, config.width_per_group)
elif config.arch == 'mobilenet_v2':
return MobileNetV2(config.class_num, config.width_multi, config.input_channel, config.last_channel, 'InvertedResidual', config.dropout)
else:
raise ValueError('invalid arch : {:}'.format( config.arch ))
elif super_type.startswith('infer'): # NAS searched architecture
assert len(super_type.split('-')) == 2, 'invalid super_type : {:}'.format(super_type)
infer_mode = super_type.split('-')[1]
if infer_mode == 'shape':
from .shape_infers import InferImagenetResNet
from .shape_infers import InferMobileNetV2
if config.arch == 'resnet':
return InferImagenetResNet(config.block_name, config.layers, config.xblocks, config.xchannels, config.deep_stem, config.class_num, config.zero_init_residual)
elif config.arch == "MobileNetV2":
return InferMobileNetV2(config.class_num, config.xchannels, config.xblocks, config.dropout)
else:
raise ValueError('invalid arch-mode : {:}'.format(config.arch))
else:
raise ValueError('invalid infer-mode : {:}'.format(infer_mode))
else:
raise ValueError('invalid super-type : {:}'.format(super_type))
# Try to obtain the network by config.
def obtain_model(config, extra_path=None):
if config.dataset == 'cifar':
return get_cifar_models(config, extra_path)
elif config.dataset == 'imagenet':
return get_imagenet_models(config)
else:
raise ValueError('invalid dataset in the model config : {:}'.format(config))
def obtain_search_model(config):
if config.dataset == 'cifar':
if config.arch == 'resnet':
from .shape_searchs import SearchWidthCifarResNet
from .shape_searchs import SearchDepthCifarResNet
from .shape_searchs import SearchShapeCifarResNet
if config.search_mode == 'width':
return SearchWidthCifarResNet(config.module, config.depth, config.class_num)
elif config.search_mode == 'depth':
return SearchDepthCifarResNet(config.module, config.depth, config.class_num)
elif config.search_mode == 'shape':
return SearchShapeCifarResNet(config.module, config.depth, config.class_num)
else: raise ValueError('invalid search mode : {:}'.format(config.search_mode))
elif config.arch == 'simres':
from .shape_searchs import SearchWidthSimResNet
if config.search_mode == 'width':
return SearchWidthSimResNet(config.depth, config.class_num)
else: raise ValueError('invalid search mode : {:}'.format(config.search_mode))
else:
raise ValueError('invalid arch : {:} for dataset [{:}]'.format(config.arch, config.dataset))
elif config.dataset == 'imagenet':
from .shape_searchs import SearchShapeImagenetResNet
assert config.search_mode == 'shape', 'invalid search-mode : {:}'.format( config.search_mode )
if config.arch == 'resnet':
return SearchShapeImagenetResNet(config.block_name, config.layers, config.deep_stem, config.class_num)
else:
raise ValueError('invalid model config : {:}'.format(config))
else:
raise ValueError('invalid dataset in the model config : {:}'.format(config))
def load_net_from_checkpoint(checkpoint):
assert osp.isfile(checkpoint), 'checkpoint {:} does not exist'.format(checkpoint)
checkpoint = torch.load(checkpoint)
model_config = dict2config(checkpoint['model-config'], None)
model = obtain_model(model_config)
model.load_state_dict(checkpoint['base-model'])
return model
| 50.790816 | 165 | 0.714917 | [
"MIT"
] | hyunghunny/AutoDL-Projects | nas201bench/models/__init__.py | 9,955 | Python |
# Tai Sakuma <[email protected]>
from __future__ import print_function
import os
import errno
import logging
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl import mkdir_p
##__________________________________________________________________||
@pytest.fixture()
def mock_makedirs(monkeypatch):
ret = mock.Mock()
monkeypatch.setattr(os, 'makedirs', ret)
return ret
@pytest.fixture()
def mock_isdir(monkeypatch):
ret = mock.Mock()
monkeypatch.setattr(os.path, 'isdir', ret)
return ret
##__________________________________________________________________||
def test_emtpy(mock_makedirs):
mkdir_p('')
assert [ ] == mock_makedirs.call_args_list
def test_success(mock_makedirs):
mkdir_p('a/b')
assert [mock.call('a/b')] == mock_makedirs.call_args_list
def test_already_exist(mock_makedirs, mock_isdir, caplog):
mock_isdir.return_value = True
mock_makedirs.side_effect = OSError(errno.EEXIST, 'already exist')
with caplog.at_level(logging.DEBUG - 1):
mkdir_p('a/b')
assert [mock.call('a/b')] == mock_makedirs.call_args_list
assert len(caplog.records) == 1
assert caplog.records[0].levelno == logging.DEBUG - 1
assert 'tried' in caplog.records[0].msg
def test_raise(mock_makedirs, mock_isdir, caplog):
mock_isdir.return_value = False
mock_makedirs.side_effect = OSError
with pytest.raises(OSError):
mkdir_p('a/b')
assert [mock.call('a/b')] == mock_makedirs.call_args_list
##__________________________________________________________________||
| 28.140351 | 70 | 0.741895 | [
"BSD-3-Clause"
] | shane-breeze/AlphaTwirl | tests/unit/misc/test_mkdir_p.py | 1,604 | Python |
import os, sys, re
import importlib
try:
from PySide.QtGui import *
from PySide.QtCore import *
except:
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from multi_script_editor import scriptEditor
importlib.reload(scriptEditor)
import MaxPlus
q3dsmax = QApplication.instance()
class MaxDialogEvents(QObject):
def eventFilter(self, obj, event):
import MaxPlus
if event.type() == QEvent.WindowActivate:
MaxPlus.CUI.DisableAccelerators()
elif event.type() == QEvent.WindowDeactivate:
MaxPlus.CUI.EnableAccelerators()
return False
def show():
try:
qtwindow = MaxPlus.GetQMaxWindow()
except:
qtwindow = MaxPlus.GetQMaxMainWindow()
se = scriptEditor.scriptEditorClass(parent=qtwindow)
#se.installEventFilter(MaxDialogEvents())
se.runCommand('import MaxPlus')
#se.MaxEventFilter = MaxDialogEvents()
#se.installEventFilter(se.MaxEventFilter)
se.show()
| 27.486486 | 56 | 0.701082 | [
"MIT"
] | ReimuSG/NukeToolSet | python/pw_multiScriptEditor/managers/_3dsmax.py | 1,017 | Python |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
from __future__ import division
import abc
import contextlib
import itertools
import threading
import unittest
from concurrent import futures
import six
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._called = False
self._passed_future = None
self._passed_other_stuff = None
def __call__(self, *args, **kwargs):
with self._condition:
self._called = True
if args:
self._passed_future = args[0]
if 1 < len(args) or kwargs:
self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
self._condition.notify_all()
def future(self):
with self._condition:
while True:
if self._passed_other_stuff is not None:
raise ValueError(
'Test callback passed unexpected values: %s',
self._passed_other_stuff)
elif self._called:
return self._passed_future
else:
self._condition.wait()
class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.TestCase)):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
response = response_future.result()
test_messages.verify(request, response, self)
self.assertIs(callback.future(), response_future)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
callback = _Callback()
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
future_passed_to_callback = callback.future()
response = future_passed_to_callback.result()
test_messages.verify(requests, response, self)
self.assertIs(future_passed_to_callback, response_future)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures = []
for _ in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures.append(response_future)
responses = [
response_future.result() for response_future in response_futures]
for request, response in zip(requests, responses):
test_messages.verify(request, response, self)
def testWaitingForSomeButNotAllParallelInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures_to_indices = {}
for index in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
inner_response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
outer_response_future = pool.submit(inner_response_future.result)
requests.append(request)
response_futures_to_indices[outer_response_future] = index
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures_to_indices),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
index = response_futures_to_indices[response_future]
test_messages.verify(requests[index], response_future.result(), self)
pool.shutdown(wait=True)
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(
group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
| 42.324324 | 97 | 0.721485 | [
"BSD-3-Clause"
] | DiracResearch/grpc | src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py | 20,358 | Python |
"""
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| 32.94 | 97 | 0.676381 | [
"BSD-3-Clause"
] | NickVeld/scikit-learn-proj | examples/feature_selection/plot_f_test_vs_mi.py | 1,647 | Python |
"""
Tests utils for tagging.
"""
from django.template import Origin
from django.template.loaders.base import Loader
class VoidLoader(Loader):
"""
Template loader which is always returning
an empty template.
"""
is_usable = True
_accepts_engine_in_init = True
def get_template_sources(self, template_name):
yield Origin(
name='voidloader',
template_name=template_name,
loader=self)
def get_contents(self, origin):
return ''
def load_template_source(self, template_name, template_dirs=None):
return ('', 'voidloader:%s' % template_name)
| 23.555556 | 70 | 0.663522 | [
"BSD-3-Clause"
] | Fantomas42/django-tagging | tagging/tests/utils.py | 636 | Python |
from imdbclassifier.train_nn import KTrain
from imdbclassifier.parser_utils_nn import KParseArgs
from time import time
import sys
import os
# Hide warning messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if __name__ == '__main__':
parser = KParseArgs()
args = parser.parse_args()
start_time = time()
flag = len(sys.argv) == 1
if flag:
print("Using Default Baseline parameters")
else:
print("Using Experimental parameters")
print("hidden_layers:", args.hidden_layers)
print("output:", args.output)
print("epochs:", args.epochs)
print("loss:", args.loss)
print("experiment_name:", args.experiment_name)
train_models_cls = KTrain().train_models(args, flag)
timed = time() - start_time
print("This model took", timed, " seconds to train and test.") | 24.323529 | 66 | 0.688029 | [
"MIT"
] | mmm84766/ai-platform | tasks/natural-language-processing/sentiment-analysis/keras/imdbclassifier/main_nn.py | 827 | Python |
"""Unit tests for Tangent PCA."""
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.spd_matrices import SPDMatrices, SPDMetricAffine
from geomstats.geometry.special_euclidean import SpecialEuclidean
from geomstats.geometry.special_orthogonal import SpecialOrthogonal
from geomstats.learning.exponential_barycenter import ExponentialBarycenter
from geomstats.learning.pca import TangentPCA
class TestTangentPCA(geomstats.tests.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.so3 = SpecialOrthogonal(n=3, point_type='vector')
self.spd = SPDMatrices(3)
self.spd_metric = SPDMetricAffine(3)
self.n_samples = 10
self.X = self.so3.random_uniform(n_samples=self.n_samples)
self.metric = self.so3.bi_invariant_metric
self.n_components = 2
@geomstats.tests.np_and_autograd_only
def test_tangent_pca_error(self):
X = self.X
tpca = TangentPCA(self.metric, n_components=self.n_components)
tpca.fit(X)
X_diff_size = gs.ones((self.n_samples, gs.shape(X)[1] + 1))
self.assertRaises(ValueError, tpca.transform, X_diff_size)
@geomstats.tests.np_and_autograd_only
def test_tangent_pca(self):
X = self.X
tpca = TangentPCA(self.metric, n_components=gs.shape(X)[1])
tpca.fit(X)
self.assertEqual(tpca.n_features_, gs.shape(X)[1])
@geomstats.tests.np_and_autograd_only
def test_fit_mle(self):
X = self.X
tpca = TangentPCA(self.metric, n_components='mle')
tpca.fit(X)
self.assertEqual(tpca.n_features_, gs.shape(X)[1])
@geomstats.tests.np_and_autograd_only
def test_fit_to_target_explained_variance(self):
X = self.spd.random_point(n_samples=5)
target = 0.90
tpca = TangentPCA(
self.spd_metric, n_components=target)
tpca.fit(X)
result = gs.cumsum(tpca.explained_variance_ratio_)[-1] > target
expected = True
self.assertAllClose(result, expected)
@geomstats.tests.np_and_autograd_only
def test_fit_matrix(self):
expected = 2
X = self.spd.random_point(n_samples=5)
tpca = TangentPCA(
metric=self.spd_metric, n_components=expected)
tpca.fit(X)
result = tpca.n_components_
self.assertAllClose(result, expected)
@geomstats.tests.np_and_autograd_only
def test_fit_transform_matrix(self):
expected = 2
X = self.spd.random_point(n_samples=5)
tpca = TangentPCA(
metric=self.spd_metric, n_components=expected)
tangent_projected_data = tpca.fit_transform(X)
result = tangent_projected_data.shape[-1]
self.assertAllClose(result, expected)
@geomstats.tests.np_and_autograd_only
def test_fit_inverse_transform_matrix(self):
X = self.spd.random_point(n_samples=5)
tpca = TangentPCA(
metric=self.spd_metric)
tangent_projected_data = tpca.fit_transform(X)
result = tpca.inverse_transform(tangent_projected_data)
expected = X
self.assertAllClose(result, expected, atol=1e-6)
@geomstats.tests.np_and_autograd_only
def test_fit_transform_vector(self):
expected = 2
tpca = TangentPCA(
metric=self.metric, n_components=expected)
tangent_projected_data = tpca.fit_transform(self.X)
result = tangent_projected_data.shape[-1]
self.assertAllClose(result, expected)
@geomstats.tests.np_and_autograd_only
def test_fit_inverse_transform_vector(self):
tpca = TangentPCA(metric=self.metric)
tangent_projected_data = tpca.fit_transform(self.X)
result = tpca.inverse_transform(tangent_projected_data)
expected = self.X
self.assertAllClose(result, expected)
@geomstats.tests.np_and_autograd_only
def test_fit_fit_transform_matrix(self):
X = self.spd.random_point(n_samples=5)
tpca = TangentPCA(
metric=self.spd_metric)
expected = tpca.fit_transform(X)
result = tpca.fit(X).transform(X)
self.assertAllClose(result, expected)
@geomstats.tests.np_and_autograd_only
def test_fit_matrix_se(self):
se_mat = SpecialEuclidean(n=3)
X = se_mat.random_point(self.n_samples)
estimator = ExponentialBarycenter(se_mat)
estimator.fit(X)
mean = estimator.estimate_
tpca = TangentPCA(metric=se_mat)
tangent_projected_data = tpca.fit_transform(X, base_point=mean)
result = tpca.inverse_transform(tangent_projected_data)
expected = X
self.assertAllClose(result, expected)
| 36.889764 | 75 | 0.691142 | [
"MIT"
] | SwastiKh/geomstats | tests/tests_geomstats/test_pca.py | 4,685 | Python |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import logging
import os
import re
import selectors
import threading
import time
from contextlib import closing
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.pantsd.process_manager import FingerprintedProcessManager, ProcessGroup
from pants.util.collections import ensure_str_list
from pants.util.dirutil import read_file, safe_file_dump, safe_open
from pants.util.memo import memoized_classproperty
logger = logging.getLogger(__name__)
class NailgunProcessGroup(ProcessGroup):
_NAILGUN_KILL_LOCK = threading.Lock()
def __init__(self, metadata_base_dir=None):
super().__init__(name="nailgun", metadata_base_dir=metadata_base_dir)
# TODO: this should enumerate the .pids dir first, then fallback to ps enumeration (& warn).
def _iter_nailgun_instances(self, everywhere=False):
def predicate(proc):
if proc.name() == NailgunExecutor._PROCESS_NAME:
if not everywhere:
return NailgunExecutor._PANTS_NG_BUILDROOT_ARG in proc.cmdline()
else:
return any(
arg.startswith(NailgunExecutor._PANTS_NG_ARG_PREFIX)
for arg in proc.cmdline()
)
return self.iter_instances(predicate)
def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info("killing nailgun server pid={pid}".format(pid=proc.pid))
proc.terminate()
# TODO: Once we integrate standard logging into our reporting framework, we can consider making
# some of the log.debug() below into log.info(). Right now it just looks wrong on the console.
class NailgunExecutor(Executor, FingerprintedProcessManager):
"""Executes java programs by launching them in nailgun server.
If a nailgun is not available for a given set of jvm args and classpath, one is launched and re-
used for the given jvm args and classpath on subsequent runs.
"""
# 'NGServer 0.9.1 started on 127.0.0.1, port 53785.'
_NG_PORT_REGEX = re.compile(r".*\s+port\s+(\d+)\.$")
# Used to identify if we own a given nailgun server.
FINGERPRINT_CMD_KEY = "-Dpants.nailgun.fingerprint"
_PANTS_NG_ARG_PREFIX = "-Dpants.buildroot"
_PANTS_OWNER_ARG_PREFIX = "-Dpants.nailgun.owner"
@memoized_classproperty
def _PANTS_NG_BUILDROOT_ARG(cls):
return "=".join((cls._PANTS_NG_ARG_PREFIX, get_buildroot()))
_NAILGUN_SPAWN_LOCK = threading.Lock()
_PROCESS_NAME = "java"
def __init__(
self,
identity,
workdir,
nailgun_classpath,
distribution,
startup_timeout=10,
connect_timeout=10,
connect_attempts=5,
metadata_base_dir=None,
):
Executor.__init__(self, distribution=distribution)
FingerprintedProcessManager.__init__(
self,
name=identity,
process_name=self._PROCESS_NAME,
metadata_base_dir=metadata_base_dir,
)
if not isinstance(workdir, str):
raise ValueError(
"Workdir must be a path string, not: {workdir}".format(workdir=workdir)
)
self._identity = identity
self._workdir = workdir
self._ng_stdout = os.path.join(workdir, "stdout")
self._ng_stderr = os.path.join(workdir, "stderr")
self._nailgun_classpath = ensure_str_list(nailgun_classpath)
self._startup_timeout = startup_timeout
self._connect_timeout = connect_timeout
self._connect_attempts = connect_attempts
def __str__(self):
return "NailgunExecutor({identity}, dist={dist}, pid={pid} socket={socket})".format(
identity=self._identity, dist=self._distribution, pid=self.pid, socket=self.socket
)
def _create_owner_arg(self, workdir):
# Currently the owner is identified via the full path to the workdir.
return "=".join((self._PANTS_OWNER_ARG_PREFIX, workdir))
def _create_fingerprint_arg(self, fingerprint):
return "=".join((self.FINGERPRINT_CMD_KEY, fingerprint))
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
# TODO(John Sirois): hash classpath contents?
encoded_jvm_options = [option.encode() for option in sorted(jvm_options)]
encoded_classpath = [cp.encode() for cp in sorted(classpath)]
encoded_java_version = repr(java_version).encode()
for item in (encoded_jvm_options, encoded_classpath, encoded_java_version):
digest.update(str(item).encode())
return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args):
"""Runner factory.
Called via Executor.execute().
"""
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, stdin=None, cwd=None):
nailgun = None
try:
nailgun = self._get_nailgun_client(
jvm_options, classpath, stdout, stderr, stdin
)
logger.debug(
"Executing via {ng_desc}: {cmd}".format(ng_desc=nailgun, cmd=this.cmd)
)
return nailgun.execute(main, cwd, *args)
except (NailgunClient.NailgunError, self.InitialNailgunConnectTimedOut) as e:
self.terminate()
raise self.Error(
"Problem launching via {ng_desc} command {main} {args}: {msg}".format(
ng_desc=nailgun or "<no nailgun connection>",
main=main,
args=" ".join(args),
msg=e,
)
)
return Runner()
def _check_nailgun_state(self, new_fingerprint):
running = self.is_alive()
updated = self.needs_restart(new_fingerprint)
logging.debug(
"Nailgun {nailgun} state: updated={up!s} running={run!s} fingerprint={old_fp} "
"new_fingerprint={new_fp} distribution={old_dist} new_distribution={new_dist}".format(
nailgun=self._identity,
up=updated,
run=running,
old_fp=self.fingerprint,
new_fp=new_fingerprint,
old_dist=self.cmd,
new_dist=self._distribution.java,
)
)
return running, updated
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner.
It handles creation of the running nailgun server as well as creation of the client.
"""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug(
"Found running nailgun server that needs updating, killing {server}".format(
server=self._identity
)
)
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(
new_fingerprint, jvm_options, classpath, stdout, stderr, stdin
)
return self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
class InitialNailgunConnectTimedOut(Exception):
_msg_fmt = """Failed to read nailgun output after {timeout} seconds!
Stdout:
{stdout}
Stderr:
{stderr}"""
def __init__(self, timeout, stdout, stderr):
msg = self._msg_fmt.format(timeout=timeout, stdout=stdout, stderr=stderr)
super(NailgunExecutor.InitialNailgunConnectTimedOut, self).__init__(msg)
def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun
stdout."""
start_time = time.time()
accumulated_stdout = ""
def calculate_remaining_time():
return time.time() - (start_time + timeout)
def possibly_raise_timeout(remaining_time):
if remaining_time > 0:
stderr = read_file(self._ng_stderr, binary_mode=True)
raise self.InitialNailgunConnectTimedOut(
timeout=timeout, stdout=accumulated_stdout, stderr=stderr,
)
# NB: We use PollSelector, rather than the more efficient DefaultSelector, because
# DefaultSelector results in using the epoll() syscall on Linux, which does not work with
# regular text files like ng_stdout. See https://stackoverflow.com/a/8645770.
with selectors.PollSelector() as selector, safe_open(self._ng_stdout, "r") as ng_stdout:
selector.register(ng_stdout, selectors.EVENT_READ)
while 1:
remaining_time = calculate_remaining_time()
possibly_raise_timeout(remaining_time)
events = selector.select(timeout=-1 * remaining_time)
if events:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
accumulated_stdout += line
def _create_ngclient(self, port, stdout, stderr, stdin):
return NailgunClient(port=port, ins=stdin, out=stdout, err=stderr)
def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug(
"Verified new ng server is connectable at {}".format(sock.getpeername())
)
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug(
"Failed to connect to ng after {} attempts".format(self._connect_attempts)
)
raise # Re-raise the NailgunConnectionError which provides more context to the user.
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin):
"""Synchronously spawn a new nailgun server."""
# Truncate the nailguns stdout & stderr.
safe_file_dump(self._ng_stdout, b"", mode="wb")
safe_file_dump(self._ng_stderr, b"", mode="wb")
jvm_options = jvm_options + [
self._PANTS_NG_BUILDROOT_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint),
]
post_fork_child_opts = dict(
fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr,
)
logger.debug(
"Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}".format(
i=self._identity, f=fingerprint, j=jvm_options, cp=classpath
)
)
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
# Wait for and write the port information in the parent so we can bail on exception/timeout.
self.await_pid(self._startup_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug(
"Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}".format(
i=self._identity, f=fingerprint, pid=self.pid, port=self.socket
)
)
client = self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
self.ensure_connectable(client)
return client
def _check_process_buildroot(self, process):
"""Matches only processes started from the current buildroot."""
return self._PANTS_NG_BUILDROOT_ARG in process.cmdline()
def is_alive(self):
"""A ProcessManager.is_alive() override that ensures buildroot flags are present in the
process command line arguments."""
return super().is_alive(self._check_process_buildroot)
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(
classpath=classpath,
main="com.martiansoftware.nailgun.NGServer",
jvm_options=jvm_options,
args=[":0"],
stdin=safe_open("/dev/null", "r"),
stdout=safe_open(self._ng_stdout, "w"),
stderr=safe_open(self._ng_stderr, "w"),
close_fds=True,
)
self.write_pid(subproc.pid)
| 40.470914 | 105 | 0.624298 | [
"Apache-2.0"
] | revl/pants | src/python/pants/java/nailgun_executor.py | 14,610 | Python |
import abc
import configparser
import json
import os
from typing import Any, Dict, Optional
from pystratum_backend.RoutineWrapperGeneratorWorker import RoutineWrapperGeneratorWorker
from pystratum_backend.StratumStyle import StratumStyle
from pystratum_common.Util import Util
class CommonRoutineWrapperGeneratorWorker(RoutineWrapperGeneratorWorker):
"""
Class for generating a class with wrapper methods for calling stored routines in a MySQL database.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, io: StratumStyle, config: configparser.ConfigParser):
"""
Object constructor.
:param PyStratumStyle io: The output decorator.
"""
self._code: str = ''
"""
The generated Python code buffer.
"""
self._lob_as_string_flag: bool = False
"""
If true BLOBs and CLOBs must be treated as strings.
"""
self._metadata_filename: Optional[str] = None
"""
The filename of the file with the metadata of all stored procedures.
"""
self._parent_class_name: Optional[str] = None
"""
The class name of the parent class of the routine wrapper.
"""
self._parent_class_namespace: Optional[str] = None
"""
The namespace of the parent class of the routine wrapper.
"""
self._wrapper_class_name: Optional[str] = None
"""
The class name of the routine wrapper.
"""
self._wrapper_filename: Optional[str] = None
"""
The filename where the generated wrapper class must be stored.
"""
self._io: StratumStyle = io
"""
The output decorator.
"""
self._config = config
"""
The configuration object.
:type: ConfigParser
"""
# ------------------------------------------------------------------------------------------------------------------
def execute(self) -> int:
"""
The "main" of the wrapper generator. Returns 0 on success, 1 if one or more errors occurred.
:rtype: int
"""
self._read_configuration_file()
if self._wrapper_class_name:
self._io.title('Wrapper')
self.__generate_wrapper_class()
self._io.writeln('')
else:
self._io.log_verbose('Wrapper not enabled')
return 0
# ------------------------------------------------------------------------------------------------------------------
def __generate_wrapper_class(self) -> None:
"""
Generates the wrapper class.
"""
routines = self._read_routine_metadata()
self._write_class_header()
if routines:
for routine_name in sorted(routines):
if routines[routine_name]['designation'] != 'hidden':
self._write_routine_function(routines[routine_name])
else:
self._io.error('No files with stored routines found')
self._write_class_trailer()
Util.write_two_phases(self._wrapper_filename, self._code, self._io)
# ------------------------------------------------------------------------------------------------------------------
def _read_configuration_file(self) -> None:
"""
Reads parameters from the configuration file.
"""
self._parent_class_name = self._config.get('wrapper', 'parent_class')
self._parent_class_namespace = self._config.get('wrapper', 'parent_class_namespace')
self._wrapper_class_name = self._config.get('wrapper', 'wrapper_class')
self._wrapper_filename = self._config.get('wrapper', 'wrapper_file')
self._metadata_filename = self._config.get('wrapper', 'metadata')
self._lob_as_string_flag = bool(self._config.get('wrapper', 'lob_as_string'))
# ------------------------------------------------------------------------------------------------------------------
def _read_routine_metadata(self) -> Dict:
"""
Returns the metadata of stored routines.
:rtype: dict
"""
metadata = {}
if os.path.isfile(self._metadata_filename):
with open(self._metadata_filename, 'r') as file:
metadata = json.load(file)
return metadata
# ------------------------------------------------------------------------------------------------------------------
def _write_class_header(self) -> None:
"""
Generate a class header for stored routine wrapper.
"""
self._write_line('from typing import Any, Dict, List, Optional, Union')
self._write_line()
self._write_line('from {0!s} import {1!s}'.format(self._parent_class_namespace, self._parent_class_name))
self._write_line()
self._write_line()
self._write_line('class {0!s}({1!s}):'.format(self._wrapper_class_name, self._parent_class_name))
self._write_line(' """')
self._write_line(' The stored routines wrappers.')
self._write_line(' """')
# ------------------------------------------------------------------------------------------------------------------
def _write_line(self, text: str = '') -> None:
"""
Writes a line with Python code to the generate code buffer.
:param str text: The line with Python code.
"""
if text:
self._code += str(text) + "\n"
else:
self._code += "\n"
# ------------------------------------------------------------------------------------------------------------------
def _write_class_trailer(self) -> None:
"""
Generate a class trailer for stored routine wrapper.
"""
self._write_line()
self._write_line()
self._write_line('# ' + ('-' * 118))
# ------------------------------------------------------------------------------------------------------------------
@abc.abstractmethod
def _write_routine_function(self, routine: Dict[str, Any]) -> None:
"""
Generates a complete wrapper method for a stored routine.
:param dict routine: The metadata of the stored routine.
"""
raise NotImplementedError()
# ----------------------------------------------------------------------------------------------------------------------
| 35.451087 | 120 | 0.496397 | [
"MIT"
] | DatabaseStratum/py-stratum-common | pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py | 6,523 | Python |
import sys
import psycopg2
import os
# Inicialização de parâmetros
database = os.environ['DATABASE_URL']
# Cria um banco de dados Postgres para armazenar informações, caso não exista.
def carregar_bd():
# Conecta ao banco de dados na URL especificada
connection = psycopg2.connect(database)
# Cria um cursor do banco de dados, que é um iterador que permite navegar
# e manipular os registros do bd, e o atribui a uma variável.
cursor = connection.cursor()
# Carrega os comandos a partir do script sql
script = open('create.sql', 'r').read()
# Executa os comandos do script SQL diretamente no banco de dados.
cursor.execute(script)
# Salva as alterações.
connection.commit()
# Encerra a conexão.
connection.close()
"""-------------------------------------------------------------------------"""
# Confere se o usuário já tem uma ficha criada.
def confere_usuário(id_usuário, id_grupo):
# Conecta ao banco de dados no arquivo Atrix.
connection = psycopg2.connect(database)
# Cria um cursor do banco de dados, que é um iterador que permite navegar
# e manipular os registros do bd, e o atribui a uma variável.
cursor = connection.cursor()
cursor.execute(''' SELECT Id_Grupo, Id_Jogador
FROM FICHAS
WHERE Id_Grupo = %s AND Id_Jogador = %s;''',
[id_grupo, id_usuário])
result = cursor.fetchall()
if len(result) > 0:
connection.close()
return True
else:
connection.close()
return False
"""-------------------------------------------------------------------------"""
# Confere se um grupo já tem uma entrada na base de dados
def confere_grupo(id_grupo):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute(''' SELECT Id_Grupo
FROM GRUPOS
WHERE Id_Grupo = %s;''',
[id_grupo])
result = cursor.fetchall()
if len(result) > 0:
connection.close()
return True
else:
connection.close()
return False
"""-------------------------------------------------------------------------"""
# Cria uma entrada para um grupo no banco de dados.
def cria_grupo(id_grupo, id_mestre, edição_livre=True):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('INSERT INTO GRUPOS VALUES(%s, %s, %s);', [id_grupo, id_mestre, edição_livre])
connection.commit()
connection.close()
"""-------------------------------------------------------------------------"""
# Cria uma entrada para um personagem no banco de dados.
def cria_ficha(id_grupo, id_jogador, nome='', identidade_civil='',
identidade_secreta = True, sexo = '', idade = 0, altura = 0.0,
peso = 0.0, tamanho = 0, olhos = '', cabelo = '', pele = '',
base = '', nivel = 0, ataques = 0, defesa = 0):
pontos = 15*nivel
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''INSERT INTO FICHAS VALUES(%s, %s, %s, %s,
%s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s);''',
[id_grupo, id_jogador, nome, identidade_civil,
identidade_secreta, sexo, idade, altura, peso, tamanho,
olhos, pele, cabelo, base, nivel, pontos, ataques, defesa])
# Insere os valores das informações básicas do personagem
cursor.execute('''INSERT INTO HABILIDADES VALUES(%s, %s,
%s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s);''',
[id_grupo, id_jogador,
10, 10, 10, 10, 10, 10,
0, 0, 0, 0, 0, 0])
# Insere os valores de habilidades do personagem
cursor.execute('''INSERT INTO SALVAMENTOS VALUES(%s, %s,
%s, %s, %s,
%s, %s, %s, %s);''',
[id_grupo, id_jogador,
0, 0, 0,
0, 0, 0, 0])
# Insere os valores de salvamentos do personagem
connection.commit()
connection.close()
"""-------------------------------------------------------------------------"""
# Adiciona um feito a uma ficha
def add_feito(id_grupo, id_jogador, nome, bonus=''):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''INSERT INTO FEITOS VALUES(%s, %s
%s, %s);''',
[id_grupo, id_jogador,
nome, bonus])
connection.commit()
connection.close()
"""-------------------------------------------------------------------------"""
# Adiciona uma perícia a uma ficha
def add_perícia(id_grupo, id_jogador, nome, habilidade, grad, bonus):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''INSERT INTO PERICIAS VALUES(%s, %s,
%s, %s, %s, %s);''',
[id_grupo, id_jogador,
nome, habilidade, grad, bonus])
connection.commit()
connection.close()
"""-------------------------------------------------------------------------"""
# Adiciona uma desvantagem a uma ficha
def add_desvantagem(id_grupo, id_jogador, desc, freq, intensidade):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''INSERT INTO DESVANTAGENS VALUES(%s, %s,
%s, %s, %s);''',
[id_grupo, id_jogador,
desc, freq, intensidade])
connection.commit()
connection.close()
"""-------------------------------------------------------------------------"""
# Adiciona um poder na ficha do personagem
def add_poder(id_grupo, id_jogador, nome, descrição, ativa, área_efeito,
tempo_ativação, tempo_recarga, duração, custo_base, grad,
feitos, extras, falhas):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''INSERT INTO PODERES_E_DISPOSITIVOS VALUES(%s, %s,
%s, %s, %s, %s,
%s, %s, %s, %s,
%s, %s, %s, %s, %s);''',
[id_grupo, id_jogador,
nome, descrição, "PODER", ativa,
área_efeito, tempo_ativação, tempo_recarga, duração,
custo_base, grad, feitos, extras, falhas])
connection.commit()
connection.close()
"""-------------------------------------------------------------------------"""
# Adiciona um dispositivo na ficha do personagem
def add_dispositivo(id_grupo, id_jogador, nome, descrição, ativa, área_efeito,
tempo_ativação, tempo_recarga, duração, custo_base, grad,
feitos, extras, falhas):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''INSERT INTO PODERES_E_DISPOSITIVOS VALUES(%s, %s,
%s, %s, %s, %s,
%s, %s, %s, %s,
%s, %s, %s, %s, %s);''',
[id_grupo, id_jogador,
nome, descrição, "DISPOSITIVO", ativa,
área_efeito, tempo_ativação, tempo_recarga, duração,
custo_base, grad, feitos, extras, falhas])
connection.commit()
connection.close()
"""-------------------------------------------------------------------------"""
# Retorna uma lista de informações básicas de uma ficha de personagem.
def get_informação_básica(id_grupo, id_jogador):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''SELECT Nome, Nivel_de_Poder, Identidade_Civil, Sexo, Idade
FROM FICHAS
WHERE Id_grupo = %s AND Id_jogador = %s;''',
id_grupo, id_jogador)
result["nome"], result["nivel"], result["ident"], result["sexo"], result["idade"] = cursor.fetchone()
for index, value in result.iteritems():
if value == "":
result[index] = "[NÃO INFORMADO]"
if index == "sexo":
if result[index] == 'M':
result[index] = '♂'
else:
result[index] = '♀'
connection.close()
return result
"""-------------------------------------------------------------------------"""
# Exclui uma entrada da tabela FICHAS
def del_ficha(id_grupo, id_jogador):
connection = psycopg2.connect(database)
cursor = connection.cursor()
cursor.execute('''DELETE FROM FICHAS
WHERE Id_grupo = %s AND Id_Jogador = %s;''',
id_grupo, id_jogador)
| 41.59009 | 105 | 0.490307 | [
"MIT"
] | felipedeoliveirarios/Atrix | database.py | 9,300 | Python |
"""
neuropredict : easy and comprehensive predictive analysis.
"""
from __future__ import print_function
__all__ = ['run', 'cli', 'get_parser']
import argparse
import os
import sys
import textwrap
import traceback
import warnings
import matplotlib
import matplotlib.pyplot as plt
from sys import version_info
from os.path import join as pjoin, exists as pexists, abspath, realpath, basename
import numpy as np
from pyradigm import MLDataset
if version_info.major > 2:
# the order of import is very important to avoid circular imports
from neuropredict import __version__
from neuropredict import config_neuropredict as cfg
from neuropredict import rhst, visualize
from neuropredict.freesurfer import aseg_stats_subcortical, aseg_stats_whole_brain
from neuropredict.io import get_metadata, get_features, get_metadata_in_pyradigm, \
get_data_matrix, get_dir_of_dirs, get_pyradigm, get_arff, saved_dataset_matches
from neuropredict.utils import check_paths, uniq_combined_name, check_num_procs, sub_group_identifier, \
save_options, load_options, validate_feature_selection_size, make_dataset_filename, not_unspecified
else:
raise NotImplementedError('neuropredict requires Python 3+.')
def get_parser():
"Parser to specify arguments and their defaults."
parser = argparse.ArgumentParser(prog="neuropredict", formatter_class=argparse.RawTextHelpFormatter,
description='Easy, standardized and comprehensive predictive analysis.')
help_text_fs_dir = textwrap.dedent("""
Absolute path to ``SUBJECTS_DIR`` containing the finished runs of Freesurfer parcellation
Each subject will be queried after its ID in the metadata file.
E.g. ``--fs_subject_dir /project/freesurfer_v5.3``
\n \n """)
help_text_user_defined_folder = textwrap.dedent("""
List of absolute paths to user's own features.
Format: Each of these folders contains a separate folder for each subject (named after its ID in the metadata file)
containing a file called features.txt with one number per line.
All the subjects (in a given folder) must have the number of features (#lines in file).
Different parent folders (describing one feature set) can have different number of features for each subject,
but they must all have the same number of subjects (folders) within them.
Names of each folder is used to annotate the results in visualizations.
Hence name them uniquely and meaningfully, keeping in mind these figures will be included in your papers.
For example,
.. parsed-literal::
--user_feature_paths /project/fmri/ /project/dti/ /project/t1_volumes/
Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.
\n \n """)
help_text_pyradigm_paths = textwrap.dedent("""
Path(s) to pyradigm datasets.
Each path is self-contained dataset identifying each sample, its class and features.
\n \n """)
help_text_data_matrix = textwrap.dedent("""
List of absolute paths to text files containing one matrix of size N x p (num_samples x num_features).
Each row in the data matrix file must represent data corresponding to sample in the same row
of the meta data file (meta data file and data matrix must be in row-wise correspondence).
Name of this file will be used to annotate the results and visualizations.
E.g. ``--data_matrix_paths /project/fmri.csv /project/dti.csv /project/t1_volumes.csv ``
Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.
File format could be
- a simple comma-separated text file (with extension .csv or .txt): which can easily be read back with
numpy.loadtxt(filepath, delimiter=',')
or
- a numpy array saved to disk (with extension .npy or .numpy) that can read in with numpy.load(filepath).
One could use ``numpy.savetxt(data_array, delimiter=',')`` or ``numpy.save(data_array)`` to save features.
File format is inferred from its extension.
\n \n """)
help_text_arff_paths = textwrap.dedent("""
List of paths to files saved in Weka's ARFF dataset format.
Note:
- this format does NOT allow IDs for each subject.
- given feature values are saved in text format, this can lead to large files with high-dimensional data,
compared to numpy arrays saved to disk in binary format.
More info: https://www.cs.waikato.ac.nz/ml/weka/arff.html
\n \n """)
help_text_positive_class = textwrap.dedent("""
Name of the positive class (e.g. Alzheimers, MCI etc) to be used in calculation of area under the ROC curve.
Applicable only for binary classification experiments.
Default: class appearing last in order specified in metadata file.
\n \n """)
help_text_train_perc = textwrap.dedent("""
Percentage of the smallest class to be reserved for training.
Must be in the interval [0.01 0.99].
If sample size is sufficiently big, we recommend 0.5.
If sample size is small, or class imbalance is high, choose 0.8.
\n \n """)
help_text_num_rep_cv = textwrap.dedent("""
Number of repetitions of the repeated-holdout cross-validation.
The larger the number, more stable the estimates will be.
\n \n """)
help_text_sub_groups = textwrap.dedent("""
This option allows the user to study different combinations of classes in a multi-class (N>2) dataset.
For example, in a dataset with 3 classes CN, FTD and AD,
two studies of pair-wise combinations can be studied separately
with the following flag ``--sub_groups CN,FTD CN,AD``.
This allows the user to focus on few interesting subgroups depending on their dataset/goal.
Format: Different subgroups must be separated by space,
and each sub-group must be a comma-separated list of class names defined in the meta data file.
Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters,
and ideally just alphanumeric characters separated by underscores.
Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.
Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.
\n \n """)
help_text_metadata_file = textwrap.dedent("""
Abs path to file containing metadata for subjects to be included for analysis.
At the minimum, each subject should have an id per row followed by the class it belongs to.
E.g.
.. parsed-literal::
sub001,control
sub002,control
sub003,disease
sub004,disease
\n \n """)
help_text_feature_selection = textwrap.dedent("""Number of features to select as part of feature selection.
Options:
- 'tenth'
- 'sqrt'
- 'log2'
- 'all'
Default: \'tenth\' of the number of samples in the training set.
For example, if your dataset has 90 samples, you chose 50 percent for training (default),
then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining.
If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.
\n \n """)
help_text_gs_level = textwrap.dedent("""
Flag to specify the level of grid search during hyper-parameter optimization on the training set.
Allowed options are : 'none', 'light' and 'exhaustive', in the order of how many values/values will be optimized.
More parameters and more values demand more resources and much longer time for optimization.
The 'light' option tries to "folk wisdom" to try least number of values (no more than one or two),
for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization).
The 'light' will be the fastest and should give a "rough idea" of predictive performance.
The 'exhaustive' option will try to most parameter values for the most parameters that can be optimized.
""")
help_text_make_vis = textwrap.dedent("""
Option to make visualizations from existing results in the given path.
This is helpful when neuropredict failed to generate result figures automatically
e.g. on a HPC cluster, or another environment when DISPLAY is either not available.
""")
help_text_atlas = textwrap.dedent("""
Name of the atlas to use for visualization. Default: fsaverage, if available.
\n \n """)
help_text_num_cpus = textwrap.dedent("""
Number of CPUs to use to parallelize CV repetitions.
Default : 4.
Number of CPUs will be capped at the number available on the machine if higher is requested.
\n \n """)
help_text_out_dir = textwrap.dedent("""
Output folder to store gathered features & results.
\n \n """)
help_classifier = textwrap.dedent("""
String specifying one of the implemented classifiers.
(Classifiers are carefully chosen to allow for the comprehensive report provided by neuropredict).
Default: 'RandomForestClassifier'
""")
help_feat_select_method = textwrap.dedent("""
Feature selection method to apply prior to training the classifier.
Default: 'VarianceThreshold', removing features with 0.001 percent of lowest variance (zeros etc).
""")
parser.add_argument("-m", "--meta_file", action="store", dest="meta_file",
default=None, required=False, help=help_text_metadata_file)
parser.add_argument("-o", "--out_dir", action="store", dest="out_dir", required=False, help=help_text_out_dir,
default=None )
parser.add_argument("-f", "--fs_subject_dir", action="store", dest="fs_subject_dir",
default=None, help=help_text_fs_dir)
user_defined = parser.add_argument_group(title='Input data and formats',
description='Only one of the following types can be specified.')
user_defined.add_argument("-y", "--pyradigm_paths", action="store", dest="pyradigm_paths",
nargs='+', # to allow for multiple features
default=None,
help=help_text_pyradigm_paths)
user_defined.add_argument("-u", "--user_feature_paths", action="store", dest="user_feature_paths",
nargs='+', # to allow for multiple features
default=None,
help=help_text_user_defined_folder)
user_defined.add_argument("-d", "--data_matrix_paths", action="store", dest="data_matrix_paths",
nargs='+',
default=None,
help=help_text_data_matrix)
user_defined.add_argument("-a", "--arff_paths", action="store", dest="arff_paths",
nargs='+',
default=None,
help=help_text_arff_paths)
cv_args_group = parser.add_argument_group(title='Cross-validation',
description='Parameters related to training and optimization during cross-validation')
cv_args_group.add_argument("-p", "--positive_class", action="store", dest="positive_class",
default=None,
help=help_text_positive_class)
cv_args_group.add_argument("-t", "--train_perc", action="store", dest="train_perc",
default=cfg.default_train_perc,
help=help_text_train_perc)
cv_args_group.add_argument("-n", "--num_rep_cv", action="store", dest="num_rep_cv",
default=cfg.default_num_repetitions,
help=help_text_num_rep_cv)
cv_args_group.add_argument("-k", "--num_features_to_select", dest="num_features_to_select",
action="store", default=cfg.default_num_features_to_select,
help=help_text_feature_selection)
cv_args_group.add_argument("-sg", "--sub_groups", action="store", dest="sub_groups",
nargs="*",
default="all",
help=help_text_sub_groups)
cv_args_group.add_argument("-g", "--gs_level", action="store", dest="gs_level",
default="light", help=help_text_gs_level, choices=cfg.GRIDSEARCH_LEVELS)
pipeline_group = parser.add_argument_group(title='Predictive Model',
description='Parameters related to pipeline comprising the predictive model')
pipeline_group.add_argument("-fs", "--feat_select_method", action="store", dest="feat_select_method",
default=cfg.default_feat_select_method, help=help_feat_select_method,
choices=cfg.feature_selection_choices)
pipeline_group.add_argument("-e", "--classifier", action="store", dest="classifier",
default=cfg.default_classifier, help=help_classifier,
choices=cfg.classifier_choices)
vis_args = parser.add_argument_group(title='Visualization',
description='Parameters related to generating visualizations')
vis_args.add_argument("-z", "--make_vis", action="store", dest="make_vis",
default=None, help=help_text_make_vis)
comp_args = parser.add_argument_group(title='Computing',
description='Parameters related to computations/debugging')
comp_args.add_argument("-c", "--num_procs", action="store", dest="num_procs",
default=cfg.DEFAULT_NUM_PROCS, help=help_text_num_cpus)
comp_args.add_argument('-v', '--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
return parser
def organize_inputs(user_args):
"""
Validates the input features specified and returns organized list of paths and readers.
Parameters
----------
user_args : ArgParse object
Various options specified by the user.
Returns
-------
user_feature_paths : list
List of paths to specified input features
user_feature_type : str
String identifying the type of user-defined input
fs_subject_dir : str
Path to freesurfer subject directory, if supplied.
"""
atleast_one_feature_specified = False
# specifying pyradigm avoids the need for separate meta data file
meta_data_supplied = False
meta_data_format = None
if not_unspecified(user_args.fs_subject_dir):
fs_subject_dir = abspath(user_args.fs_subject_dir)
if not pexists(fs_subject_dir):
raise IOError("Given Freesurfer directory doesn't exist.")
atleast_one_feature_specified = True
else:
fs_subject_dir = None
# ensuring only one type is specified
mutually_excl_formats = ['user_feature_paths', 'data_matrix_paths', 'pyradigm_paths', 'arff_paths']
not_none_count = 0
for format in mutually_excl_formats:
if not_unspecified(getattr(user_args, format)):
not_none_count = not_none_count + 1
if not_none_count > 1:
raise ValueError('Only one of the following formats can be specified:\n{}'.format(mutually_excl_formats))
if not_unspecified(user_args.user_feature_paths):
user_feature_paths = check_paths(user_args.user_feature_paths, path_type='user defined (dir_of_dirs)')
atleast_one_feature_specified = True
user_feature_type = 'dir_of_dirs'
elif not_unspecified(user_args.data_matrix_paths):
user_feature_paths = check_paths(user_args.data_matrix_paths, path_type='data matrix')
atleast_one_feature_specified = True
user_feature_type = 'data_matrix'
elif not_unspecified(user_args.pyradigm_paths):
user_feature_paths = check_paths(user_args.pyradigm_paths, path_type='pyradigm')
atleast_one_feature_specified = True
meta_data_supplied = user_feature_paths[0]
meta_data_format = 'pyradigm'
user_feature_type = 'pyradigm'
elif not_unspecified(user_args.arff_paths):
user_feature_paths = check_paths(user_args.arff_paths, path_type='ARFF')
atleast_one_feature_specified = True
user_feature_type = 'arff'
meta_data_supplied = user_feature_paths[0]
meta_data_format = 'arff'
else:
user_feature_paths = None
user_feature_type = None
# map in python 3 returns a generator, not a list, so len() wouldnt work
if not isinstance(user_feature_paths, list):
user_feature_paths = list(user_feature_paths)
if not atleast_one_feature_specified:
raise ValueError('Atleast one method specifying features must be specified. '
'It can be a path(s) to pyradigm dataset, matrix file, user-defined folder or a Freesurfer subject directory.')
return user_feature_paths, user_feature_type, fs_subject_dir, meta_data_supplied, meta_data_format
def parse_args():
"""Parser/validator for the cmd line args."""
parser = get_parser()
if len(sys.argv) < 2:
print('Too few arguments!')
parser.print_help()
parser.exit(1)
# parsing
try:
user_args = parser.parse_args()
except:
parser.exit(1)
if len(sys.argv) == 3 and not_unspecified(user_args.make_vis):
out_dir = realpath(user_args.make_vis)
res_path = pjoin(out_dir,cfg.file_name_results)
if pexists(out_dir) and pexists(res_path):
print('\n\nSaving the visualizations to \n{}'.format(out_dir))
make_visualizations(res_path, out_dir)
sys.exit(0)
else:
raise ValueError('Given folder does not exist, or has no results!')
user_feature_paths, user_feature_type, fs_subject_dir, meta_data_path, meta_data_format = organize_inputs(user_args)
if not meta_data_path:
if user_args.meta_file is not None:
meta_file = abspath(user_args.meta_file)
if not pexists(meta_file):
raise IOError("Meta data file doesn't exist.")
else:
raise ValueError('Metadata file must be provided when not using pyradigm/ARFF inputs.')
sample_ids, classes = get_metadata(meta_file)
else:
print('Using meta data from:\n{}'.format(meta_data_path))
sample_ids, classes = get_metadata_in_pyradigm(meta_data_path, meta_data_format)
if user_args.out_dir is not None:
out_dir = realpath(user_args.out_dir)
else:
out_dir = pjoin(realpath(os.getcwd()), cfg.output_dir_default)
try:
os.makedirs(out_dir, exist_ok=True)
except:
raise IOError('Output folder could not be created.')
train_perc = np.float32(user_args.train_perc)
if not ( 0.01 <= train_perc <= 0.99):
raise ValueError("Training percentage {} out of bounds - must be >= 0.01 and <= 0.99".format(train_perc))
num_rep_cv = np.int64(user_args.num_rep_cv)
if num_rep_cv < 10:
raise ValueError("Atleast 10 repetitions of CV is recommened.")
num_procs = check_num_procs(user_args.num_procs)
class_set, subgroups, positive_class = validate_class_set(classes, user_args.sub_groups, user_args.positive_class)
feature_selection_size = validate_feature_selection_size(user_args.num_features_to_select)
grid_search_level = user_args.gs_level.lower()
if grid_search_level not in cfg.GRIDSEARCH_LEVELS:
raise ValueError('Unrecognized level of grid search. Valid choices: {}'.format(cfg.GRIDSEARCH_LEVELS))
classifier = user_args.classifier.lower()
feat_select_method = user_args.feat_select_method.lower()
# saving the validated and expanded values to disk for later use.
options_to_save = [sample_ids, classes, out_dir, user_feature_paths, user_feature_type, fs_subject_dir,
train_perc, num_rep_cv, positive_class, subgroups, feature_selection_size, num_procs,
grid_search_level, classifier, feat_select_method]
options_path = save_options(options_to_save, out_dir)
return sample_ids, classes, out_dir, options_path, \
user_feature_paths, user_feature_type, fs_subject_dir, \
train_perc, num_rep_cv, \
positive_class, subgroups, \
feature_selection_size, num_procs, \
grid_search_level, classifier, feat_select_method
def make_visualizations(results_file_path, out_dir, options_path=None):
"""
Produces the performance visualizations/comparisons from the cross-validation results.
Parameters
----------
results_file_path : str
Path to file containing results produced by `rhst`
out_dir : str
Path to a folder to store results.
"""
results_dict = rhst.load_results_dict(results_file_path)
# using shorter names for readability
accuracy_balanced = results_dict['accuracy_balanced']
method_names = results_dict['method_names']
num_classes = results_dict['num_classes']
class_sizes = results_dict['class_sizes']
confusion_matrix = results_dict['confusion_matrix']
class_order = results_dict['class_set']
feature_importances_rf = results_dict['feature_importances_rf']
feature_names = results_dict['feature_names']
num_times_misclfd = results_dict['num_times_misclfd']
num_times_tested = results_dict['num_times_tested']
feature_importances_available = True
if options_path is not None:
user_options = load_options(out_dir, options_path)
if user_options['classifier_name'].lower() not in cfg.clfs_with_feature_importance:
feature_importances_available = False
else:
# check if the all values are NaN
unusable = [ np.all(np.isnan(method_fi.flatten())) for method_fi in feature_importances_rf ]
feature_importances_available = not np.all(unusable)
try:
balacc_fig_path = pjoin(out_dir, 'balanced_accuracy')
visualize.metric_distribution(accuracy_balanced, method_names, balacc_fig_path,
class_sizes, num_classes, "Balanced Accuracy")
confmat_fig_path = pjoin(out_dir, 'confusion_matrix')
visualize.confusion_matrices(confusion_matrix, class_order, method_names, confmat_fig_path)
cmp_misclf_fig_path = pjoin(out_dir, 'compare_misclf_rates')
if num_classes > 2:
visualize.compare_misclf_pairwise(confusion_matrix, class_order, method_names, cmp_misclf_fig_path)
elif num_classes == 2:
visualize.compare_misclf_pairwise_parallel_coord_plot(confusion_matrix, class_order, method_names,
cmp_misclf_fig_path)
if feature_importances_available:
featimp_fig_path = pjoin(out_dir, 'feature_importance')
visualize.feature_importance_map(feature_importances_rf, method_names, featimp_fig_path, feature_names)
else:
print('\nCurrent predictive model does not provide feature importance values. Skipping them.')
misclf_out_path = pjoin(out_dir, 'misclassified_subjects')
visualize.freq_hist_misclassifications(num_times_misclfd, num_times_tested, method_names, misclf_out_path)
except:
traceback.print_exc()
warnings.warn('Error generating the visualizations! Skipping ..')
# cleaning up
plt.close('all')
return
def validate_class_set(classes, subgroups, positive_class=None):
"Ensures class names are valid and sub-groups exist."
class_set = list(set(classes.values()))
sub_group_list = list()
if subgroups != 'all':
if isinstance(subgroups, str):
subgroups = [ subgroups, ]
for comb in subgroups:
cls_list = comb.split(',')
# ensuring each subgroup has atleast two classes
if len(set(cls_list)) < 2:
raise ValueError('This subgroup {} does not contain two unique classes.'.format(comb))
# verify each of them were defined in meta
for cls in cls_list:
if cls not in class_set:
raise ValueError("Class {} in combination {} "
"does not exist in meta data.".format(cls, comb))
sub_group_list.append(cls_list)
else:
# using all classes
sub_group_list.append(class_set)
# the following loop is required to preserve original order
# this does not: class_order_in_meta = list(set(classes.values()))
class_order_in_meta = list()
for x in class_set:
if x not in class_order_in_meta:
class_order_in_meta.append(x)
num_classes = len(class_order_in_meta)
if num_classes < 2:
raise ValueError("Atleast two classes are required for predictive analysis! "
"Only one given ({})".format(set(classes.values())))
if num_classes == 2:
if not_unspecified(positive_class):
if positive_class not in class_order_in_meta:
raise ValueError('Positive class specified does not exist in meta data.\n'
'Choose one of {}'.format(class_order_in_meta))
print('Positive class specified for AUC calculation: {}'.format(positive_class))
else:
positive_class = class_order_in_meta[-1]
print('Positive class inferred for AUC calculation: {}'.format(positive_class))
return class_set, sub_group_list, positive_class
def import_datasets(method_list, out_dir, subjects, classes, feature_path, feature_type='dir_of_dirs'):
"""
Imports all the specified feature sets and organizes them into datasets.
Parameters
----------
method_list : list of callables
Set of predefined methods returning a vector of features for a given sample id and location
out_dir : str
Path to the output folder
subjects : list of str
List of sample ids
classes : dict
Dict identifying the class for each sample id in the dataset.
feature_path : list of str
List of paths to the root directory containing the features (pre- or user-defined).
Must be of same length as method_list
feature_type : str
a string identifying the structure of feature set.
Choices = ('dir_of_dirs', 'data_matrix')
Returns
-------
method_names : list of str
List of method names used for annotation.
dataset_paths_file : str
Path to the file containing paths to imported feature sets.
"""
def clean_str(string): return ' '.join(string.strip().split(' _-:\n\r\t'))
method_names = list()
outpath_list = list()
for mm, cur_method in enumerate(method_list):
if cur_method in [get_dir_of_dirs]:
method_name = basename(feature_path[mm])
elif cur_method in [get_data_matrix]:
method_name = os.path.splitext(basename(feature_path[mm]))[0]
elif cur_method in [get_pyradigm]:
if feature_type in ['pyradigm']:
loaded_dataset = MLDataset(filepath=feature_path[mm])
else:
raise ValueError('Invalid state of the program!')
if len(loaded_dataset.description) > 1:
method_name = loaded_dataset.description
else:
method_name = basename(feature_path[mm])
method_names.append(clean_str(method_name))
if saved_dataset_matches(loaded_dataset, subjects, classes):
outpath_list.append(feature_path[mm])
continue
else:
raise ValueError('supplied pyradigm dataset does not match samples in the meta data.')
elif cur_method in [get_arff]:
loaded_dataset = MLDataset(arff_path=feature_path[mm])
if len(loaded_dataset.description) > 1:
method_name = loaded_dataset.description
else:
method_name = basename(feature_path[mm])
method_names.append(clean_str(method_name))
out_name = make_dataset_filename(method_name)
outpath_dataset = pjoin(out_dir, out_name)
loaded_dataset.save(outpath_dataset)
outpath_list.append(outpath_dataset)
continue
else:
# adding an index for an even more unique identification
# method_name = '{}_{}'.format(cur_method.__name__,mm)
method_name = cur_method.__name__
method_names.append(clean_str(method_name))
out_name = make_dataset_filename(method_name)
outpath_dataset = pjoin(out_dir, out_name)
if not saved_dataset_matches(outpath_dataset, subjects, classes):
# noinspection PyTypeChecker
outpath_dataset = get_features(subjects, classes,
feature_path[mm],
out_dir, out_name,
cur_method, feature_type)
outpath_list.append(outpath_dataset)
combined_name = uniq_combined_name(method_names)
dataset_paths_file = pjoin(out_dir, 'datasetlist.' + combined_name + '.txt')
with open(dataset_paths_file, 'w') as dpf:
dpf.writelines('\n'.join(outpath_list))
return method_names, dataset_paths_file
def make_method_list(fs_subject_dir, user_feature_paths, user_feature_type='dir_of_dirs'):
"""
Returns an organized list of feature paths and methods to read in features.
Parameters
----------
fs_subject_dir : str
user_feature_paths : list of str
user_feature_type : str
Returns
-------
feature_dir : list
method_list : list
"""
freesurfer_readers = [aseg_stats_subcortical, aseg_stats_whole_brain]
userdefined_readers = {'dir_of_dirs': get_dir_of_dirs,
'data_matrix': get_data_matrix,
'pyradigm': get_pyradigm,
'arff': get_arff}
feature_dir = list()
method_list = list()
if not_unspecified(user_feature_paths):
if user_feature_type not in userdefined_readers:
raise NotImplementedError("Invalid feature type or its reader is not implemented yet!")
for upath in user_feature_paths:
feature_dir.append(upath)
method_list.append(userdefined_readers[user_feature_type])
if not_unspecified(fs_subject_dir):
for fsrdr in freesurfer_readers:
feature_dir.append(fs_subject_dir)
method_list.append(fsrdr)
if len(method_list) != len(feature_dir):
raise ValueError('Invalid specification for features!')
if len(method_list) < 1:
raise ValueError('Atleast one feature set must be specified.')
print("\nRequested features for analysis:")
for mm, method in enumerate(method_list):
print("{} from {}".format(method.__name__, feature_dir[mm]))
return feature_dir, method_list
def prepare_and_run(subjects, classes, out_dir, options_path,
user_feature_paths, user_feature_type, fs_subject_dir,
train_perc, num_rep_cv, positive_class,
sub_group_list, feature_selection_size, num_procs,
grid_search_level, classifier, feat_select_method):
"Organizes the inputs and prepares them for CV"
feature_dir, method_list = make_method_list(fs_subject_dir, user_feature_paths, user_feature_type)
method_names, dataset_paths_file = import_datasets(method_list, out_dir, subjects, classes,
feature_dir, user_feature_type)
# iterating through the given set of subgroups
for sub_group in sub_group_list:
print('{}\nProcessing subgroup : {}\n{}'.format('-'*80, sub_group, '-'*80))
out_dir_sg = pjoin(out_dir, sub_group_identifier(sub_group))
results_file_path = rhst.run(dataset_paths_file, method_names, out_dir_sg,
train_perc=train_perc, num_repetitions=num_rep_cv,
positive_class=positive_class, sub_group=sub_group,
feat_sel_size=feature_selection_size, num_procs=num_procs,
grid_search_level=grid_search_level,
classifier_name=classifier, feat_select_method=feat_select_method,
options_path=options_path)
print('\n\nSaving the visualizations to \n{}'.format(out_dir))
make_visualizations(results_file_path, out_dir_sg, options_path)
print('\n')
return
def cli():
"""
Main entry point.
"""
subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, \
fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, \
feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method = parse_args()
print('Running neuropredict {}'.format(__version__))
prepare_and_run(subjects, classes, out_dir, options_path,
user_feature_paths, user_feature_type, fs_subject_dir,
train_perc, num_rep_cv, positive_class,
sub_group_list, feature_selection_size, num_procs,
grid_search_level, classifier, feat_select_method)
return
def run(feature_sets,
feature_type=cfg.default_feature_type,
meta_data=None,
output_dir=None,
pipeline=None,
train_perc=0.5,
num_repetitions=200,
positive_class=None,
feat_sel_size=cfg.default_num_features_to_select,
sub_groups='all',
grid_search_level=cfg.GRIDSEARCH_LEVEL_DEFAULT,
num_procs=2):
"""
Generate comprehensive report on the predictive performance for different feature sets and statistically compare them.
Main entry point for API access.
Parameters
----------
feature_sets : list
The input can be specified in either of the following ways:
- list of paths to pyradigm datasets saved on disk
- path to a file containing list of paths (each line containing path to a valid MLDataset)
- list of MLDatasets that are already loaded
- list of tuples (to specify multiple features), each element containing (X, y) i.e. data and target labels
- a single tuple containing (X, y) i.e. data and target labels
- list of paths to CSV files, each containing one type of features.
When specifying multiple sets of input features, ensure:
- all of them contain the same number of samples
- each sample belongs to same class across all feature sets.
feature_type : str
String identifying the type of features as described above. It could be:
'list_of_pyradigm_paths', 'pyradigm_list',
'list_of_tuples', 'tuple', 'list_of_csv_paths'
meta_data : multiple
The meta data can be specified in either of the following ways:
- a path to a meta data file (see :doc:`features` page)
- a dict keyed in by sample IDs with values representing their classes.
- None, if meta data is already specified in ``feature_sets`` input (e.g. with pyradigms).
pipeline : str or object
If a string, it identifying one of the implemented classifiers e.g. 'RandomForestClassifier' or 'ExtraTreesClassifier'
If an object, it must be a sciki-learn pipeline describing the sequence of steps.
This is typically a set of feature selections or dimensionality reduction steps followed by an estimator (classifier).
See http://scikit-learn.org/stable/modules/pipeline.html#pipeline for more details.
Default: None, which leads to the selection of a Random Forest classifier,
with robust scaling, followed by removal of low variance features.
method_names : list
A list of names to denote the different feature sets
out_results_dir : str
Path to output directory to save the cross validation results to.
If not specified, a new directory named 'neuropredict' will be created in the current directory.
train_perc : float, optional
Percetange of subjects to train the classifier on.
The percentage is applied to the size of the smallest class to estimate
the number of subjects from each class to be reserved for training.
The smallest class is chosen to avoid class-imbalance in the training set.
Default: 0.8 (80%).
positive_class : str
Name of the class to be treated as positive in calculation of AUC
feat_sel_size : str or int
Number of features to select as part of feature selection. Options:
- 'tenth'
- 'sqrt'
- 'log2'
- 'all'
Default: \'tenth\' of the number of samples in the training set. For example, if your dataset has 90 samples, you chose 50 percent for training (default), then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining. If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.
num_repetitions : int, optional
Number of repetitions of cross-validation estimation. Default: 200.
num_procs : int, optional
Number of CPUs to use to parallelize CV repetitions.
Default : 4. Number of CPUs will be capped at the number available on the machine if higher is requested.
sub_groups : list
This option allows the user to study different combinations of classes in a multi-class (N>2) dataset. For example, in a dataset with 3 classes CN, FTD and AD, two studies of pair-wise combinations can be studied separately with the following flag ``--sub_groups CN,FTD CN,AD``. This allows the user to focus on few interesting subgroups depending on their dataset/goal.
Format: Different subgroups must be separated by space, and each sub-group must be a comma-separated list of class names defined in the meta data file. Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters, and ideally just alphanumeric characters separated by underscores. Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.
Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.
grid_search_level : str
Flag to specify the level of grid search during hyper-parameter optimization on the training set.
Allowed options are : 'none', 'light' and 'exhaustive', in the order of how many values/values will be optimized.
More parameters and more values demand more resources and much longer time for optimization.
The 'light' option tries to "folk wisdom" to try least number of values (no more than one or two),
for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization).
The 'light' will be the fastest and should give a "rough idea" of predictive performance.
The 'exhaustive' option will try to most parameter values for the most parameters that can be optimized.
Returns
-------
results_path : str
Path to pickle file containing full set of CV results.
"""
raise NotImplementedError
return
if __name__ == '__main__':
cli()
| 43.069742 | 438 | 0.673227 | [
"MIT"
] | dinga92/neuropredict | neuropredict/run_workflow.py | 40,141 | Python |
from typing import Any, Callable, List, Optional, Tuple
import gym
import numpy as np
from tianshou.env.worker import EnvWorker
try:
import ray
except ImportError:
pass
class _SetAttrWrapper(gym.Wrapper):
def set_env_attr(self, key: str, value: Any) -> None:
setattr(self.env, key, value)
def get_env_attr(self, key: str) -> Any:
return getattr(self.env, key)
class RayEnvWorker(EnvWorker):
"""Ray worker used in RayVectorEnv."""
def __init__(self, env_fn: Callable[[], gym.Env]) -> None:
self.env = ray.remote(_SetAttrWrapper).options(num_cpus=0).remote(env_fn())
super().__init__(env_fn)
def get_env_attr(self, key: str) -> Any:
return ray.get(self.env.get_env_attr.remote(key))
def set_env_attr(self, key: str, value: Any) -> None:
ray.get(self.env.set_env_attr.remote(key, value))
def reset(self) -> Any:
return ray.get(self.env.reset.remote())
@staticmethod
def wait( # type: ignore
workers: List["RayEnvWorker"], wait_num: int, timeout: Optional[float] = None
) -> List["RayEnvWorker"]:
results = [x.result for x in workers]
ready_results, _ = ray.wait(results, num_returns=wait_num, timeout=timeout)
return [workers[results.index(result)] for result in ready_results]
def send_action(self, action: np.ndarray) -> None:
# self.action is actually a handle
self.result = self.env.step.remote(action)
def get_result(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
return ray.get(self.result)
def seed(self, seed: Optional[int] = None) -> List[int]:
super().seed(seed)
return ray.get(self.env.seed.remote(seed))
def render(self, **kwargs: Any) -> Any:
return ray.get(self.env.render.remote(**kwargs))
def close_env(self) -> None:
ray.get(self.env.close.remote())
| 30.365079 | 85 | 0.652378 | [
"MIT"
] | BIT-UAV-JJJ/tianshou | tianshou/env/worker/ray.py | 1,913 | Python |
"""
Base settings for jackergram project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (jackergram/config/settings/base.py - 3 = jackergram/)
APPS_DIR = ROOT_DIR.path('jackergram')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'jackergram.users.apps.UsersConfig',
# Your stuff: custom apps go here
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'jackergram.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""jacker""", '[email protected]'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Uses django-environ to accept uri format
# See: https://django-environ.readthedocs.io/en/latest/#supported-types
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///jackergram'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Seoul'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'jackergram.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'jackergram.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| 36.956522 | 101 | 0.627255 | [
"MIT"
] | jmk2729/jackergram | config/settings/base.py | 10,200 | Python |
Subsets and Splits