filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_0_8125
#The simplest way to work with zlib requires holding all of the data to be compressed or decompressed in memory. import zlib import binascii original_data = b'This is the original text.' print('Original :', len(original_data), original_data) compressed = zlib.compress(original_data) print('Compressed :', len(compressed), binascii.hexlify(compressed)) decompressed = zlib.decompress(compressed) print('Decompressed :', len(decompressed), decompressed) #示例演示了少量数据的压缩版本可能比未压缩版本大。 # 虽然实际结果取决于输入数据,但观察小数据集的压缩开销很有意思 """ output: Original : 26 b'This is the original text.' Compressed : 32 b'789c0bc9c82c5600a2928c5485fca2ccf4ccbcc41c8592d48a123d007f2f097e' Decompressed : 26 b'This is the original text.' """
the-stack_0_8127
# !/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals # -------------------------------------------# # author: sean lee # # email: [email protected] # #--------------------------------------------# """MIT License Copyright (c) 2018 Sean Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" import sys if sys.version_info[0] == 2: reload(sys) sys.setdefaultencoding('utf8') range = xrange import cPickle as pickle else: import pickle import io from ..module import Module from ..utils import native_content class Radical(Module): __notsave__ = [] __onlysave__ = ['dictionary'] def __init__(self): self.dictionary = {} def train(self, fpath): for fname in self.filelist(fpath): with io.open(fname, 'r', encoding="utf-8") as f: for line in f: line = line.strip() arr = line.split(',') if len(arr) != 2: continue self.dictionary[arr[0]] = arr[1] def radical(self, char): if char in self.dictionary: return self.dictionary[char] return None
the-stack_0_8128
#!/usr/bin/env python import matplotlib.pyplot as plt import theanets from utils import load_mnist, plot_layers, plot_images e = theanets.Experiment( theanets.Classifier, layers=(784, 1024, 256, 64, 10), train_batches=100, ) # first, run an unsupervised layerwise pretrainer. train, valid, _ = load_mnist() e.train(train, valid, optimize='pretrain', patience=1, min_improvement=0.1) # second, run a supervised trainer on the classifier model. train, valid, _ = load_mnist(labels=True) e.train(train, valid) plot_layers([e.network.find(i, 0) for i in (1, 2, 3)], tied_weights=True) plt.tight_layout() plt.show()
the-stack_0_8129
import logging from astropy.table import Table from astropy.coordinates import SkyCoord from astropy import units as u from astropy.io import fits import numpy as np import math import matplotlib.pyplot as plt from LCOWCSLookupProvider import getWCSForcamera, transformList from gaiaastrometryservicetools import astrometryServiceRefineWCSFromCatalog from SourceCatalogProvider import e91SourceCatalogProvider, SEPSourceCatalogProvider __author__ = '[email protected]' log = logging.getLogger(__name__) class CatalogMatcher: ''' Class to match two input catalogs: sourcecatalog is a catalog of sources extracted from an image, in coordinates of pixels (x,y) referencecatalog is a catalog of on-sky objects based on existing surveys, in coordinates of (RA, Dec) WCS is a astropy world coordiante system. the source catalog shall be a astropy Table with the columns 'x', 'y' the reference catalog shall be a astropy Table with the columns 'RA', 'Dec' ''' @staticmethod def createMatchedCatalogForLCO(imagepath, referenceCatalogProvider, matchradius=5, minobjects=1e20, undistort=False): ''' Automatically load source catalog from an LCO e91 processed file, fetch a reference catalog, and return a matchedcatalog object.''' if ('e91.fits' in imagepath): sourceCatalogProvider = e91SourceCatalogProvider() else: sourceCatalogProvider = SEPSourceCatalogProvider() sourceCatalog, image_wcs = sourceCatalogProvider.get_source_catalog(imagepath) if (sourceCatalog is None) or (image_wcs is None): return None if len(sourceCatalog['x']) < minobjects: log.info("Not enough stars found in source catalog (%d). %d are required. Skipping this one." % ( len(sourceCatalog['x']), minobjects)) return None ra = image_wcs.wcs.crval[0] dec = image_wcs.wcs.crval[1] # TODO: get camera identifier, date obs, etc exptime = None filter = None camera = None dateobs = None azimuth = None altitude = None hdu = fits.open(imagepath) # TODO: We are opening and closing fits files quite a lot here, might be not most efficient. # Go searching for meta data, in multiple extension ssince we might have a .fz compressed file :-( for extension in [0, 1]: if 'EXPTIME' in hdu[extension].header: exptime = hdu[extension].header['EXPTIME'] if ('FILTER') in hdu[extension].header: filter = hdu[extension].header['FILTER'] if 'DATE-OBS' in hdu[extension].header: dateobs = hdu[extension].header['DATE-OBS'] if 'INSTRUME' in hdu[extension].header: camera = hdu[extension].header['INSTRUME'] if 'AZIMUTH' in hdu[extension].header: azimuth = hdu[extension].header['AZIMUTH'] if 'ALTITUDE' in hdu[extension].header: altitude = hdu[extension].header['ALTITUDE'] hdu.close() # remove the distortion from the input catalog if requested and refine the WCS. if undistort: sip = getWCSForcamera(camera, image_wcs.wcs.crpix[0], image_wcs.wcs.crpix[1]) if sip is not None: log.info("undistorting image") u, v = transformList(sourceCatalog['x'], sourceCatalog['y'], sip) sourceCatalog['x'] = u sourceCatalog['y'] = v dedistortedwcs = astrometryServiceRefineWCSFromCatalog(sourceCatalog, image_wcs) if dedistortedwcs is not None: image_wcs = dedistortedwcs else: log.warning("astrometry.net did not find a solution on the undistorted image. Using original wcs") # fetch a reference catalog: referenceCatalog = referenceCatalogProvider.get_reference_catalog(ra, dec, 0.25) matchedCatalog = CatalogMatcher() matchedCatalog.matchCatalogs(sourceCatalog, referenceCatalog, image_wcs, matchradius) matchedCatalog.exptime = exptime matchedCatalog.filter = filter matchedCatalog.dateobs = dateobs matchedCatalog.camera = camera matchedCatalog.altitude = altitude matchedCatalog.azimuth = azimuth matchedCatalog.azimuth = azimuth return matchedCatalog def matchCatalogs(self, source=None, reference=None, wcs=None, matchradius=5): ''' match input catalogs. If no new catalogs are given, the match will be done on the chached catalogs of the class. ''' self.matchedCatalog = None # Cache management if wcs is not None: self.wcs = wcs if source is not None: self.source = source if reference is not None: self.reference = reference # transform source catalog to RADEC try: sourcera, sourcedec = self.wcs.all_pix2world(self.source['x'], self.source['y'], 1) sourceSkyCoords = SkyCoord(ra=sourcera * u.degree, dec=sourcedec * u.degree) referenceSkyCoords = SkyCoord(ra=self.reference['RA'] * u.degree, dec=self.reference['Dec'] * u.degree) idx, d2d, d3d = referenceSkyCoords.match_to_catalog_sky(sourceSkyCoords) distance = referenceSkyCoords.separation(sourceSkyCoords[idx]).arcsecond matchcondition = (distance < matchradius) self.matchedCatalog = Table([self.source['x'][idx][matchcondition], self.source['y'][idx][matchcondition], self.reference['RA'][matchcondition], self.reference['Dec'][matchcondition], distance[matchcondition] ], names=['x', 'y', 'RA', 'Dec', 'distarcsec'] ) except: log.exception("Error while transforming and matching") nummatched = len(self.matchedCatalog) if self.matchedCatalog is not None else 0 log.info("MatchCatalogs found {: 10d} pairs at search radius {: 6.3f}".format(nummatched, matchradius)) return self.matchedCatalog def updateWCSandUpdateRMS(self, usewcs=None): ''' transform the pixel list with a new wcs and get the distance based merrit function of that sollution. Note that when this is called, there should be already a matched catalog avaiable. ''' if usewcs is not None: self.wcs = usewcs # log.debug ("WCS updated for MatchedCatalog") else: pass # log.info ("WCS not updated") sourcera, sourcedec = self.wcs.all_pix2world(self.matchedCatalog['x'], self.matchedCatalog['y'], 1) sourceSkyCoords = SkyCoord(ra=sourcera * u.degree, dec=sourcedec * u.degree) referenceSkyCoords = SkyCoord(ra=self.matchedCatalog['RA'] * u.degree, dec=self.matchedCatalog['Dec'] * u.degree) self.matchedCatalog['distarcsec'] = referenceSkyCoords.separation(sourceSkyCoords).arcsecond result = math.sqrt(np.sum(self.matchedCatalog['distarcsec'] ** 2) / len(self.matchedCatalog['distarcsec'])) # log.info ("WCS CRVAL % 12.9f % 12.9f , Source RA / Dec [0] %f %f Merrit %f" % (self.wcs.wcs.crval[0], self.wcs.wcs.crval[1], sourcera[0], sourcedec[0], result)) return result def diagnosticPlots(self, basename): ''' Generate some helpful diagnostics for the distortion. ''' if not self.matchedCatalog: return sourcera, sourcedec = self.wcs.all_pix2world(self.matchedCatalog['x'], self.matchedCatalog['y'], 1) deccor = math.cos(self.wcs.wcs.crval[1] * math.pi / 180) plt.subplot(projection=self.wcs) plt.plot(sourcera, sourcedec, '.') plt.plot(self.matchedCatalog['RA'], self.matchedCatalog['Dec'], '.') plt.xlabel("RA") plt.ylabel("DEC") plt.title(basename) plt.savefig("%s_RADEC.png" % basename) plt.close() plt.clf() plt.subplot(4, 1, 1) plt.title(basename) plt.plot(self.matchedCatalog['x'] - self.wcs.wcs.crpix[0], (self.matchedCatalog['RA'] - sourcera) * 3600. / deccor, '.') plt.xlabel("X [pixels]") plt.ylabel("residual RA [\'\']") plt.ylim([-1.75, 1.75]) plt.subplot(4, 1, 2) plt.plot(self.matchedCatalog['x'] - self.wcs.wcs.crpix[0], (self.matchedCatalog['Dec'] - sourcedec) * 3600., '.') plt.xlabel("X [pixels]") plt.ylabel("resiudal Dec [\'\']") plt.ylim([-1.75, 1.75]) plt.subplot(4, 1, 3) plt.plot(self.matchedCatalog['y'] - self.wcs.wcs.crpix[1], (self.matchedCatalog['RA'] - sourcera) * 3600. / deccor, '.') plt.xlabel("Y [pixels]") plt.ylabel("residual ra [\'\']") plt.ylim([-1.75, 1.75]) plt.subplot(4, 1, 4) plt.plot(self.matchedCatalog['y'] - self.wcs.wcs.crpix[1], (self.matchedCatalog['Dec'] - sourcedec) * 3600., '.') plt.xlabel("Y [pixels]") plt.ylabel("residual dec [\'\']") plt.ylim([-1.75, 1.75]) plt.savefig("%s_residuals.png" % basename, dpi=200) plt.close() # plt.clf() # plt.plot(np.sqrt((self.matchedCatalog['y'] - self.wcs.wcs.crpix[1]) ** 2 + ( # self.matchedCatalog['x'] - self.wcs.wcs.crpix[0]) ** 2), # self.matchedCatalog['distarcsec'], '.') # plt.xlabel("radius [pixels]") # plt.ylabel("Distance [\'\']") # plt.savefig("%s_radialdist.png" % basename)
the-stack_0_8130
#!/usr/bin/env python3 # Copyright (c) 2017-2019 The BitPal Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listsinceblock RPC.""" from test_framework.test_framework import BitPalTestFramework from test_framework.messages import BIP125_SEQUENCE_NUMBER from test_framework.util import ( assert_array_result, assert_equal, assert_raises_rpc_error, connect_nodes, ) from decimal import Decimal class ListSinceBlockTest(BitPalTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # All nodes are in IBD from genesis, so they'll need the miner (node2) to be an outbound connection, or have # only one connection. (See fPreferredDownload in net_processing) connect_nodes(self.nodes[1], 2) self.nodes[2].generate(101) self.sync_all() self.test_no_blockhash() self.test_invalid_blockhash() self.test_reorg() self.test_double_spend() self.test_double_send() self.double_spends_filtered() def test_no_blockhash(self): self.log.info("Test no blockhash") txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) blockhash, = self.nodes[2].generate(1) blockheight = self.nodes[2].getblockheader(blockhash)['height'] self.sync_all() txs = self.nodes[0].listtransactions() assert_array_result(txs, {"txid": txid}, { "category": "receive", "amount": 1, "blockhash": blockhash, "blockheight": blockheight, "confirmations": 1, }) assert_equal( self.nodes[0].listsinceblock(), {"lastblock": blockhash, "removed": [], "transactions": txs}) assert_equal( self.nodes[0].listsinceblock(""), {"lastblock": blockhash, "removed": [], "transactions": txs}) def test_invalid_blockhash(self): self.log.info("Test invalid blockhash") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "0000000000000000000000000000000000000000000000000000000000000000") assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 11, for 'invalid-hex')", self.nodes[0].listsinceblock, "invalid-hex") assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock, "Z000000000000000000000000000000000000000000000000000000000000000") def test_reorg(self): ''' `listsinceblock` did not behave correctly when handed a block that was no longer in the main chain: ab0 / \ aa1 [tx0] bb1 | | aa2 bb2 | | aa3 bb3 | bb4 Consider a client that has only seen block `aa3` above. It asks the node to `listsinceblock aa3`. But at some point prior the main chain switched to the bb chain. Previously: listsinceblock would find height=4 for block aa3 and compare this to height=5 for the tip of the chain (bb4). It would then return results restricted to bb3-bb4. Now: listsinceblock finds the fork at ab0 and returns results in the range bb1-bb4. This test only checks that [tx0] is present. ''' self.log.info("Test reorg") # Split network into two self.split_network() # send to nodes[0] from nodes[2] senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) # generate on both sides nodes1_last_blockhash = self.nodes[1].generate(6)[-1] nodes2_first_blockhash = self.nodes[2].generate(7)[0] self.log.debug("nodes[1] last blockhash = {}".format(nodes1_last_blockhash)) self.log.debug("nodes[2] first blockhash = {}".format(nodes2_first_blockhash)) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:]) self.join_network() # listsinceblock(nodes1_last_blockhash) should now include tx as seen from nodes[0] # and return the block height which listsinceblock now exposes since a5e7795. transactions = self.nodes[0].listsinceblock(nodes1_last_blockhash)['transactions'] found = next(tx for tx in transactions if tx['txid'] == senttx) assert_equal(found['blockheight'], self.nodes[0].getblockheader(nodes2_first_blockhash)['height']) def test_double_spend(self): ''' This tests the case where the same UTXO is spent twice on two separate blocks as part of a reorg. ab0 / \ aa1 [tx1] bb1 [tx2] | | aa2 bb2 | | aa3 bb3 | bb4 Problematic case: 1. User 1 receives BCC in tx1 from utxo1 in block aa1. 2. User 2 receives BCC in tx2 from utxo1 (same) in block bb1 3. User 1 sees 2 confirmations at block aa3. 4. Reorg into bb chain. 5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now invalidated. Currently the solution to this is to detect that a reorg'd block is asked for in listsinceblock, and to iterate back over existing blocks up until the fork point, and to include all transactions that relate to the node wallet. ''' self.log.info("Test double spend") self.sync_all() # Split network into two self.split_network() # share utxo between nodes[1] and nodes[2] utxos = self.nodes[2].listunspent() utxo = utxos[0] privkey = self.nodes[2].dumpprivkey(utxo['address']) self.nodes[1].importprivkey(privkey) # send from nodes[1] using utxo to nodes[0] change = '%.8f' % (float(utxo['amount']) - 1.0003) recipient_dict = { self.nodes[0].getnewaddress(): 1, self.nodes[1].getnewaddress(): change, } utxo_dicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] txid1 = self.nodes[1].sendrawtransaction( self.nodes[1].signrawtransactionwithwallet( self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex']) # send from nodes[2] using utxo to nodes[3] recipient_dict2 = { self.nodes[3].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } self.nodes[2].sendrawtransaction( self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex']) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(4) self.join_network() self.sync_all() # gettransaction should work for txid1 assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1" # listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # but it should not include 'removed' if include_removed=false lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False) assert 'removed' not in lsbres2 def test_double_send(self): ''' This tests the case where the same transaction is submitted twice on two separate blocks as part of a reorg. The former will vanish and the latter will appear as the true transaction (with confirmations dropping as a result). ab0 / \ aa1 [tx1] bb1 | | aa2 bb2 | | aa3 bb3 [tx1] | bb4 Asserted: 1. tx1 is listed in listsinceblock. 2. It is included in 'removed' as it was removed, even though it is now present in a different block. 3. It is listed with a confirmation count of 2 (bb3, bb4), not 3 (aa1, aa2, aa3). ''' self.log.info("Test double send") self.sync_all() # Split network into two self.split_network() # create and sign a transaction utxos = self.nodes[2].listunspent() utxo = utxos[0] change = '%.8f' % (float(utxo['amount']) - 1.0003) recipient_dict = { self.nodes[0].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } utxo_dicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] signedtxres = self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict)) assert signedtxres['complete'] signedtx = signedtxres['hex'] # send from nodes[1]; this will end up in aa1 txid1 = self.nodes[1].sendrawtransaction(signedtx) # generate bb1-bb2 on right side self.nodes[2].generate(2) # send from nodes[2]; this will end up in bb3 txid2 = self.nodes[2].sendrawtransaction(signedtx) assert_equal(txid1, txid2) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(2) self.join_network() self.sync_all() # gettransaction should work for txid1 tx1 = self.nodes[0].gettransaction(txid1) assert_equal(tx1['blockheight'], self.nodes[0].getblockheader(tx1['blockhash'])['height']) # listsinceblock(lastblockhash) should now include txid1 in transactions # as well as in removed lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['transactions']) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # find transaction and ensure confirmations is valid for tx in lsbres['transactions']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) # the same check for the removed array; confirmations should STILL be 2 for tx in lsbres['removed']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) def double_spends_filtered(self): ''' `listsinceblock` was returning conflicted transactions even if they occurred before the specified cutoff blockhash ''' self.log.info("Test spends filtered") spending_node = self.nodes[2] dest_address = spending_node.getnewaddress() tx_input = dict( sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in spending_node.listunspent())) rawtx = spending_node.createrawtransaction( [tx_input], {dest_address: tx_input["amount"] - Decimal("0.00051000"), spending_node.getrawchangeaddress(): Decimal("0.00050000")}) signedtx = spending_node.signrawtransactionwithwallet(rawtx) orig_tx_id = spending_node.sendrawtransaction(signedtx["hex"]) original_tx = spending_node.gettransaction(orig_tx_id) double_tx = spending_node.bumpfee(orig_tx_id) # check that both transactions exist block_hash = spending_node.listsinceblock( spending_node.getblockhash(spending_node.getblockcount())) original_found = False double_found = False for tx in block_hash['transactions']: if tx['txid'] == original_tx['txid']: original_found = True if tx['txid'] == double_tx['txid']: double_found = True assert_equal(original_found, True) assert_equal(double_found, True) lastblockhash = spending_node.generate(1)[0] # check that neither transaction exists block_hash = spending_node.listsinceblock(lastblockhash) original_found = False double_found = False for tx in block_hash['transactions']: if tx['txid'] == original_tx['txid']: original_found = True if tx['txid'] == double_tx['txid']: double_found = True assert_equal(original_found, False) assert_equal(double_found, False) if __name__ == '__main__': ListSinceBlockTest().main()
the-stack_0_8132
# -*- coding: utf-8 -*- """ spectrum """ # import standard libraries import os from colour.colorimetry.spectrum import MultiSpectralDistributions from colour.models.rgb.datasets import srgb # import third party libraries import numpy as np from colour import SpectralShape, XYZ_to_RGB, XYZ_to_xyY from colour.models import RGB_COLOURSPACE_BT709 from sympy import Symbol, diff from colour.utilities import tstack # import my libraries import plot_utility as pu import spectrum_calculation as scl from spectrum_calculation import VALID_WAVELENGTH_ST, VALID_WAVELENGTH_ED,\ REFRECT_100P_SD import color_space as cs import test_pattern_generator2 as tpg import transfer_functions as tf # information __author__ = 'Toru Yoshihara' __copyright__ = 'Copyright (C) 2021 - Toru Yoshihara' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Toru Yoshihara' __email__ = 'toru.ver.11 at-sign gmail.com' __all__ = [] def load_camera_spectral_sensitivity_database(): sony_ss = scl.get_sony_nex5_ss() fig, ax1 = pu.plot_1_graph( fontsize=18, figsize=(10, 6), bg_color=(0.96, 0.96, 0.96), graph_title="SONY NEX-5N", graph_title_size=None, xlabel="Wavelength [nm]", ylabel="???", axis_label_size=None, legend_size=14, xlim=[380, 730], ylim=None, xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=2, minor_xtick_num=None, minor_ytick_num=None) ax1.plot( sony_ss.wavelengths, sony_ss.values[..., 0], label="R", color=pu.RED, alpha=1.0) ax1.plot( sony_ss.wavelengths, sony_ss.values[..., 1], label="G", color=pu.GREEN, alpha=1.0) ax1.plot( sony_ss.wavelengths, sony_ss.values[..., 2], label="B", color=pu.BLUE, alpha=1.0) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="./img/sony_ssd.png") # pu.show_and_save( # fig=fig, legend_loc='upper right', save_fname=None) def plot_camera_gamut(): sony_ss = scl.get_sony_nex5_ss() sony_csd = scl.CameraSpectralDistribution(sony_ss) primaries, white = sony_csd.calc_primary_xyY_and_white_xyY() print(primaries) print(white) fig, ax1 = pu.plot_1_graph( fontsize=18, figsize=(10, 10), bg_color=(0.96, 0.96, 0.96), graph_title="SONY NEX-5N", graph_title_size=None, xlabel="x", ylabel="y", axis_label_size=None, legend_size=14, xlim=None, ylim=None, xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=2, minor_xtick_num=None, minor_ytick_num=None) ax1.plot(primaries[..., 0], primaries[..., 1], label="Gamut") ax1.plot(white[0], white[1], 'x', label="Gamut", ms=10, mew=3) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="./img/sony_gamut.png") def debug_least_square_method(): var_str_list = [ ['m11', 'm12', 'm13'], ['m21', 'm22', 'm23'], ['m31', 'm32', 'm33']] mtx = [[Symbol(var_str_list[i][j]) for j in range(3)] for i in range(3)] xx = Symbol('xx') yy = Symbol('yy') zz = Symbol('zz') rr = Symbol('rr') gg = Symbol('gg') bb = Symbol('bb') jr = (xx - (mtx[0][0] * rr + mtx[0][1] * gg + mtx[0][2] * bb)) ** 2 jg = (yy - (mtx[1][0] * rr + mtx[1][1] * gg + mtx[1][2] * bb)) ** 2 jb = (zz - (mtx[2][0] * rr + mtx[2][1] * gg + mtx[2][2] * bb)) ** 2 jj = jr + jg + jb m11_diff = diff(jr, mtx[0][0]) m12_diff = diff(jr, mtx[0][1]) m13_diff = diff(jr, mtx[0][2]) print(m11_diff) print(m12_diff) print(m13_diff) def debug_cct_matrix(): color_temp = 6504 light_sd = scl.calc_illuminant_d_spectrum(color_temp) color_checker_sd = scl.load_color_checker_spectrum() camera_ss = scl.get_sony_nex5_ss() cmfs = scl.get_cie_2_1931_cmf() cct_matrix = scl.calc_cct_matrix_from_color_checker(camera_ss=camera_ss) camera_rgb = scl.calc_tristimulus_values_from_multi_spectrum( src_sd=light_sd, ref_sd=color_checker_sd, ss=camera_ss) measure_xyz = scl.calc_xyz_from_multi_spectrum( src_sd=light_sd, ref_sd=color_checker_sd, cmfs=cmfs) print(cct_matrix) camera_xyz_using_mtx = scl.apply_matrix(src=camera_rgb, mtx=cct_matrix) true_rgb = XYZ_to_RGB( measure_xyz, cs.D65, cs.D65, RGB_COLOURSPACE_BT709.matrix_XYZ_to_RGB) estimated_rgb = XYZ_to_RGB( camera_xyz_using_mtx, cs.D65, cs.D65, RGB_COLOURSPACE_BT709.matrix_XYZ_to_RGB) true_rgb_srgb = tf.oetf(np.clip(true_rgb, 0.0, 1.0), tf.SRGB) est_rgb_srgb = tf.oetf(np.clip(estimated_rgb, 0.0, 1.0), tf.SRGB) img = tpg.plot_color_checker_image( rgb=true_rgb_srgb, rgb2=est_rgb_srgb) tpg.img_wirte_float_as_16bit_int("./img/cct_mtx.png", img) # primaries xmin = 0.0 xmax = 0.8 ymin = -0.4 ymax = 1.2 primary_rgb = np.array([ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 1, 1]]) primary_xyz = scl.apply_matrix(primary_rgb, cct_matrix) primary_xyY = XYZ_to_xyY(primary_xyz) bt709_gamut, _ = tpg.get_primaries(name=cs.BT709) bt2020_gamut, _ = tpg.get_primaries(name=cs.BT2020) dci_p3_gamut, _ = tpg.get_primaries(name=cs.P3_D65) xy_image = tpg.get_chromaticity_image( xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(8, 14), bg_color=(0.96, 0.96, 0.96), graph_title="Chromaticity Diagram?", graph_title_size=None, xlabel="x", ylabel="y", axis_label_size=None, legend_size=17, xlim=[xmin, xmax], ylim=[ymin, ymax], xtick=[0.1 * x for x in range(9)], ytick=[0.1 * x - 0.4 for x in range(17)], xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None) cmf_xy = tpg._get_cmfs_xy() ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', label=None) ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1], c=pu.RED, label="BT.709", lw=2, alpha=0.8) ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1], c=pu.YELLOW, label="BT.2020", lw=2, alpha=0.8) ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1], c=pu.BLUE, label="DCI-P3", lw=2, alpha=0.8) ax1.plot( (cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]), '-k', label=None) ax1.plot( primary_xyY[:4, 0], primary_xyY[:4, 1], color='k', label="SONY NEX-5N") ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax)) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="img/camera_chroma_test.png") def calc_camera_gamut_from_ss(): color_temp = 6504 light_sd = scl.REFRECT_100P_SD camera_ss = scl.get_sony_nex5_ss() cmfs = scl.get_cie_2_1931_cmf() cr = camera_ss.values[..., 0] cg = camera_ss.values[..., 1] cb = camera_ss.values[..., 2] rr = cmfs.values[..., 0] gg = cmfs.values[..., 1] bb = cmfs.values[..., 2] r_base = cr - cr*cg - cr*cb g_base = cg - cg*cr - cg*cb b_base = cb - cb*cr - cb*cg rx = np.sum(r_base * rr) ry = np.sum(r_base * gg) rz = np.sum(r_base * bb) gx = np.sum(g_base * rr) gy = np.sum(g_base * gg) gz = np.sum(g_base * bb) bx = np.sum(b_base * rr) by = np.sum(b_base * gg) bz = np.sum(b_base * bb) r_xyY = XYZ_to_xyY(tstack([rx, ry, rz])) g_xyY = XYZ_to_xyY(tstack([gx, gy, gz])) b_xyY = XYZ_to_xyY(tstack([bx, by, bz])) print(r_xyY) print(g_xyY) print(b_xyY) def plot_camera_capture_xy_value(): wavelengths = REFRECT_100P_SD.wavelengths cmfs = scl.get_cie_2_1931_cmf() length = len(wavelengths) spectrum_array = np.zeros((length, length)) for idx in range(length): spectrum_array[idx, idx] = 1 data = dict(zip(wavelengths, spectrum_array)) src_sd = MultiSpectralDistributions(data=data) camera_ss = scl.get_sony_nex5_ss() camera_rgb = scl.calc_tristimulus_values_from_multi_spectrum( src_sd=REFRECT_100P_SD, ref_sd=src_sd, ss=camera_ss) cct_matrix = scl.calc_cct_matrix_from_color_checker(camera_ss=camera_ss) camera_xyz_using_mtx = scl.apply_matrix(src=camera_rgb, mtx=cct_matrix) camera_xyY = XYZ_to_xyY(camera_xyz_using_mtx) # ok_idx = camera_xyY[..., 2] != 0 ok_idx = (wavelengths >= 400) & (wavelengths <= 720) ok_wavelength = wavelengths[ok_idx] ok_xyY = camera_xyY[ok_idx] linear_rgb_from_line_spectrum = scl.calc_linear_rgb_from_spectrum( src_sd=REFRECT_100P_SD, ref_sd=src_sd, cmfs=cmfs, color_space=RGB_COLOURSPACE_BT709) linear_rgb_from_line_spectrum = linear_rgb_from_line_spectrum[ok_idx] linear_rgb_from_line_spectrum =\ linear_rgb_from_line_spectrum / np.max(linear_rgb_from_line_spectrum, -1)[0] # primaries xmin = 0.0 xmax = 0.8 ymin = -0.4 ymax = 1.2 primary_rgb = np.array([ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 1, 1]]) primary_xyz = scl.apply_matrix(primary_rgb, cct_matrix) primary_xyY = XYZ_to_xyY(primary_xyz) bt709_gamut, _ = tpg.get_primaries(name=cs.BT709) bt2020_gamut, _ = tpg.get_primaries(name=cs.BT2020) dci_p3_gamut, _ = tpg.get_primaries(name=cs.P3_D65) xy_image = tpg.get_chromaticity_image( xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(8, 14), bg_color=(0.96, 0.96, 0.96), graph_title="Chromaticity Diagram?", graph_title_size=None, xlabel="x", ylabel="y", axis_label_size=None, legend_size=17, xlim=[xmin, xmax], ylim=[ymin, ymax], xtick=[0.1 * x for x in range(9)], ytick=[0.1 * x - 0.4 for x in range(17)], xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None) cmf_xy = tpg._get_cmfs_xy() ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', label=None) ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1], c=pu.RED, label="BT.709", lw=2, alpha=0.8) ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1], c=pu.YELLOW, label="BT.2020", lw=2, alpha=0.8) ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1], c=pu.BLUE, label="DCI-P3", lw=2, alpha=0.8) ax1.plot( (cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]), '-k', label=None) ax1.plot( primary_xyY[:4, 0], primary_xyY[:4, 1], color='k', label="SONY NEX-5N") ax1.scatter( ok_xyY[..., 0], ok_xyY[..., 1], label="monochromatic light", edgecolors=None, c=(0.4, 0.4, 0.4) ) ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax)) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="img/camera_chroma_with_line_spectrum.png") if __name__ == '__main__': os.chdir(os.path.dirname(os.path.abspath(__file__))) # load_camera_spectral_sensitivity_database() # plot_camera_gamut() # debug_least_square_method() # debug_cct_matrix() # calc_camera_gamut_from_ss() plot_camera_capture_xy_value()
the-stack_0_8133
from bisect import bisect_left from bisect import bisect_right from contextlib import contextmanager from copy import deepcopy from functools import wraps from inspect import isclass import calendar import collections import datetime import decimal import hashlib import itertools import logging import operator import re import socket import struct import sys import threading import time import uuid import warnings try: from collections.abc import Mapping except ImportError: from collections import Mapping try: from pysqlite3 import dbapi2 as pysq3 except ImportError: try: from pysqlite2 import dbapi2 as pysq3 except ImportError: pysq3 = None try: import sqlite3 except ImportError: sqlite3 = pysq3 else: if pysq3 and pysq3.sqlite_version_info >= sqlite3.sqlite_version_info: sqlite3 = pysq3 try: from psycopg2cffi import compat compat.register() except ImportError: pass try: import psycopg2 from psycopg2 import extensions as pg_extensions try: from psycopg2 import errors as pg_errors except ImportError: pg_errors = None except ImportError: psycopg2 = pg_errors = None try: from psycopg2.extras import register_uuid as pg_register_uuid pg_register_uuid() except Exception: pass mysql_passwd = False try: import pymysql as mysql except ImportError: try: import MySQLdb as mysql mysql_passwd = True except ImportError: mysql = None __version__ = '3.14.4' __all__ = [ 'AsIs', 'AutoField', 'BareField', 'BigAutoField', 'BigBitField', 'BigIntegerField', 'BinaryUUIDField', 'BitField', 'BlobField', 'BooleanField', 'Case', 'Cast', 'CharField', 'Check', 'chunked', 'Column', 'CompositeKey', 'Context', 'Database', 'DatabaseError', 'DatabaseProxy', 'DataError', 'DateField', 'DateTimeField', 'DecimalField', 'DeferredForeignKey', 'DeferredThroughModel', 'DJANGO_MAP', 'DoesNotExist', 'DoubleField', 'DQ', 'EXCLUDED', 'Field', 'FixedCharField', 'FloatField', 'fn', 'ForeignKeyField', 'IdentityField', 'ImproperlyConfigured', 'Index', 'IntegerField', 'IntegrityError', 'InterfaceError', 'InternalError', 'IPField', 'JOIN', 'ManyToManyField', 'Model', 'ModelIndex', 'MySQLDatabase', 'NotSupportedError', 'OP', 'OperationalError', 'PostgresqlDatabase', 'PrimaryKeyField', # XXX: Deprecated, change to AutoField. 'prefetch', 'ProgrammingError', 'Proxy', 'QualifiedNames', 'SchemaManager', 'SmallIntegerField', 'Select', 'SQL', 'SqliteDatabase', 'Table', 'TextField', 'TimeField', 'TimestampField', 'Tuple', 'UUIDField', 'Value', 'ValuesList', 'Window', ] try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logger = logging.getLogger('peewee') logger.addHandler(NullHandler()) if sys.version_info[0] == 2: text_type = unicode bytes_type = str buffer_type = buffer izip_longest = itertools.izip_longest callable_ = callable multi_types = (list, tuple, frozenset, set) exec('def reraise(tp, value, tb=None): raise tp, value, tb') def print_(s): sys.stdout.write(s) sys.stdout.write('\n') else: import builtins try: from collections.abc import Callable except ImportError: from collections import Callable from functools import reduce callable_ = lambda c: isinstance(c, Callable) text_type = str bytes_type = bytes buffer_type = memoryview basestring = str long = int multi_types = (list, tuple, frozenset, set, range) print_ = getattr(builtins, 'print') izip_longest = itertools.zip_longest def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value if sqlite3: sqlite3.register_adapter(decimal.Decimal, str) sqlite3.register_adapter(datetime.date, str) sqlite3.register_adapter(datetime.time, str) __sqlite_version__ = sqlite3.sqlite_version_info else: __sqlite_version__ = (0, 0, 0) __date_parts__ = set(('year', 'month', 'day', 'hour', 'minute', 'second')) # Sqlite does not support the `date_part` SQL function, so we will define an # implementation in python. __sqlite_datetime_formats__ = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d', '%H:%M:%S', '%H:%M:%S.%f', '%H:%M') __sqlite_date_trunc__ = { 'year': '%Y-01-01 00:00:00', 'month': '%Y-%m-01 00:00:00', 'day': '%Y-%m-%d 00:00:00', 'hour': '%Y-%m-%d %H:00:00', 'minute': '%Y-%m-%d %H:%M:00', 'second': '%Y-%m-%d %H:%M:%S'} __mysql_date_trunc__ = __sqlite_date_trunc__.copy() __mysql_date_trunc__['minute'] = '%Y-%m-%d %H:%i:00' __mysql_date_trunc__['second'] = '%Y-%m-%d %H:%i:%S' def _sqlite_date_part(lookup_type, datetime_string): assert lookup_type in __date_parts__ if not datetime_string: return dt = format_date_time(datetime_string, __sqlite_datetime_formats__) return getattr(dt, lookup_type) def _sqlite_date_trunc(lookup_type, datetime_string): assert lookup_type in __sqlite_date_trunc__ if not datetime_string: return dt = format_date_time(datetime_string, __sqlite_datetime_formats__) return dt.strftime(__sqlite_date_trunc__[lookup_type]) def __deprecated__(s): warnings.warn(s, DeprecationWarning) class attrdict(dict): def __getattr__(self, attr): try: return self[attr] except KeyError: raise AttributeError(attr) def __setattr__(self, attr, value): self[attr] = value def __iadd__(self, rhs): self.update(rhs); return self def __add__(self, rhs): d = attrdict(self); d.update(rhs); return d SENTINEL = object() #: Operations for use in SQL expressions. OP = attrdict( AND='AND', OR='OR', ADD='+', SUB='-', MUL='*', DIV='/', BIN_AND='&', BIN_OR='|', XOR='#', MOD='%', EQ='=', LT='<', LTE='<=', GT='>', GTE='>=', NE='!=', IN='IN', NOT_IN='NOT IN', IS='IS', IS_NOT='IS NOT', LIKE='LIKE', ILIKE='ILIKE', BETWEEN='BETWEEN', REGEXP='REGEXP', IREGEXP='IREGEXP', CONCAT='||', BITWISE_NEGATION='~') # To support "django-style" double-underscore filters, create a mapping between # operation name and operation code, e.g. "__eq" == OP.EQ. DJANGO_MAP = attrdict({ 'eq': operator.eq, 'lt': operator.lt, 'lte': operator.le, 'gt': operator.gt, 'gte': operator.ge, 'ne': operator.ne, 'in': operator.lshift, 'is': lambda l, r: Expression(l, OP.IS, r), 'like': lambda l, r: Expression(l, OP.LIKE, r), 'ilike': lambda l, r: Expression(l, OP.ILIKE, r), 'regexp': lambda l, r: Expression(l, OP.REGEXP, r), }) #: Mapping of field type to the data-type supported by the database. Databases #: may override or add to this list. FIELD = attrdict( AUTO='INTEGER', BIGAUTO='BIGINT', BIGINT='BIGINT', BLOB='BLOB', BOOL='SMALLINT', CHAR='CHAR', DATE='DATE', DATETIME='DATETIME', DECIMAL='DECIMAL', DEFAULT='', DOUBLE='REAL', FLOAT='REAL', INT='INTEGER', SMALLINT='SMALLINT', TEXT='TEXT', TIME='TIME', UUID='TEXT', UUIDB='BLOB', VARCHAR='VARCHAR') #: Join helpers (for convenience) -- all join types are supported, this object #: is just to help avoid introducing errors by using strings everywhere. JOIN = attrdict( INNER='INNER JOIN', LEFT_OUTER='LEFT OUTER JOIN', RIGHT_OUTER='RIGHT OUTER JOIN', FULL='FULL JOIN', FULL_OUTER='FULL OUTER JOIN', CROSS='CROSS JOIN', NATURAL='NATURAL JOIN', LATERAL='LATERAL', LEFT_LATERAL='LEFT JOIN LATERAL') # Row representations. ROW = attrdict( TUPLE=1, DICT=2, NAMED_TUPLE=3, CONSTRUCTOR=4, MODEL=5) SCOPE_NORMAL = 1 SCOPE_SOURCE = 2 SCOPE_VALUES = 4 SCOPE_CTE = 8 SCOPE_COLUMN = 16 # Rules for parentheses around subqueries in compound select. CSQ_PARENTHESES_NEVER = 0 CSQ_PARENTHESES_ALWAYS = 1 CSQ_PARENTHESES_UNNESTED = 2 # Regular expressions used to convert class names to snake-case table names. # First regex handles acronym followed by word or initial lower-word followed # by a capitalized word. e.g. APIResponse -> API_Response / fooBar -> foo_Bar. # Second regex handles the normal case of two title-cased words. SNAKE_CASE_STEP1 = re.compile('(.)_*([A-Z][a-z]+)') SNAKE_CASE_STEP2 = re.compile('([a-z0-9])_*([A-Z])') # Helper functions that are used in various parts of the codebase. MODEL_BASE = '_metaclass_helper_' def with_metaclass(meta, base=object): return meta(MODEL_BASE, (base,), {}) def merge_dict(source, overrides): merged = source.copy() if overrides: merged.update(overrides) return merged def quote(path, quote_chars): if len(path) == 1: return path[0].join(quote_chars) return '.'.join([part.join(quote_chars) for part in path]) is_model = lambda o: isclass(o) and issubclass(o, Model) def ensure_tuple(value): if value is not None: return value if isinstance(value, (list, tuple)) else (value,) def ensure_entity(value): if value is not None: return value if isinstance(value, Node) else Entity(value) def make_snake_case(s): first = SNAKE_CASE_STEP1.sub(r'\1_\2', s) return SNAKE_CASE_STEP2.sub(r'\1_\2', first).lower() def chunked(it, n): marker = object() for group in (list(g) for g in izip_longest(*[iter(it)] * n, fillvalue=marker)): if group[-1] is marker: del group[group.index(marker):] yield group class _callable_context_manager(object): def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with self: return fn(*args, **kwargs) return inner class Proxy(object): """ Create a proxy or placeholder for another object. """ __slots__ = ('obj', '_callbacks') def __init__(self): self._callbacks = [] self.initialize(None) def initialize(self, obj): self.obj = obj for callback in self._callbacks: callback(obj) def attach_callback(self, callback): self._callbacks.append(callback) return callback def passthrough(method): def inner(self, *args, **kwargs): if self.obj is None: raise AttributeError('Cannot use uninitialized Proxy.') return getattr(self.obj, method)(*args, **kwargs) return inner # Allow proxy to be used as a context-manager. __enter__ = passthrough('__enter__') __exit__ = passthrough('__exit__') def __getattr__(self, attr): if self.obj is None: raise AttributeError('Cannot use uninitialized Proxy.') return getattr(self.obj, attr) def __setattr__(self, attr, value): if attr not in self.__slots__: raise AttributeError('Cannot set attribute on proxy.') return super(Proxy, self).__setattr__(attr, value) class DatabaseProxy(Proxy): """ Proxy implementation specifically for proxying `Database` objects. """ def connection_context(self): return ConnectionContext(self) def atomic(self, *args, **kwargs): return _atomic(self, *args, **kwargs) def manual_commit(self): return _manual(self) def transaction(self, *args, **kwargs): return _transaction(self, *args, **kwargs) def savepoint(self): return _savepoint(self) class ModelDescriptor(object): pass # SQL Generation. class AliasManager(object): __slots__ = ('_counter', '_current_index', '_mapping') def __init__(self): # A list of dictionaries containing mappings at various depths. self._counter = 0 self._current_index = 0 self._mapping = [] self.push() @property def mapping(self): return self._mapping[self._current_index - 1] def add(self, source): if source not in self.mapping: self._counter += 1 self[source] = 't%d' % self._counter return self.mapping[source] def get(self, source, any_depth=False): if any_depth: for idx in reversed(range(self._current_index)): if source in self._mapping[idx]: return self._mapping[idx][source] return self.add(source) def __getitem__(self, source): return self.get(source) def __setitem__(self, source, alias): self.mapping[source] = alias def push(self): self._current_index += 1 if self._current_index > len(self._mapping): self._mapping.append({}) def pop(self): if self._current_index == 1: raise ValueError('Cannot pop() from empty alias manager.') self._current_index -= 1 class State(collections.namedtuple('_State', ('scope', 'parentheses', 'settings'))): def __new__(cls, scope=SCOPE_NORMAL, parentheses=False, **kwargs): return super(State, cls).__new__(cls, scope, parentheses, kwargs) def __call__(self, scope=None, parentheses=None, **kwargs): # Scope and settings are "inherited" (parentheses is not, however). scope = self.scope if scope is None else scope # Try to avoid unnecessary dict copying. if kwargs and self.settings: settings = self.settings.copy() # Copy original settings dict. settings.update(kwargs) # Update copy with overrides. elif kwargs: settings = kwargs else: settings = self.settings return State(scope, parentheses, **settings) def __getattr__(self, attr_name): return self.settings.get(attr_name) def __scope_context__(scope): @contextmanager def inner(self, **kwargs): with self(scope=scope, **kwargs): yield self return inner class Context(object): __slots__ = ('stack', '_sql', '_values', 'alias_manager', 'state') def __init__(self, **settings): self.stack = [] self._sql = [] self._values = [] self.alias_manager = AliasManager() self.state = State(**settings) def as_new(self): return Context(**self.state.settings) def column_sort_key(self, item): return item[0].get_sort_key(self) @property def scope(self): return self.state.scope @property def parentheses(self): return self.state.parentheses @property def subquery(self): return self.state.subquery def __call__(self, **overrides): if overrides and overrides.get('scope') == self.scope: del overrides['scope'] self.stack.append(self.state) self.state = self.state(**overrides) return self scope_normal = __scope_context__(SCOPE_NORMAL) scope_source = __scope_context__(SCOPE_SOURCE) scope_values = __scope_context__(SCOPE_VALUES) scope_cte = __scope_context__(SCOPE_CTE) scope_column = __scope_context__(SCOPE_COLUMN) def __enter__(self): if self.parentheses: self.literal('(') return self def __exit__(self, exc_type, exc_val, exc_tb): if self.parentheses: self.literal(')') self.state = self.stack.pop() @contextmanager def push_alias(self): self.alias_manager.push() yield self.alias_manager.pop() def sql(self, obj): if isinstance(obj, (Node, Context)): return obj.__sql__(self) elif is_model(obj): return obj._meta.table.__sql__(self) else: return self.sql(Value(obj)) def literal(self, keyword): self._sql.append(keyword) return self def value(self, value, converter=None, add_param=True): if converter: value = converter(value) elif converter is None and self.state.converter: # Explicitly check for None so that "False" can be used to signify # that no conversion should be applied. value = self.state.converter(value) if isinstance(value, Node): with self(converter=None): return self.sql(value) elif is_model(value): # Under certain circumstances, we could end-up treating a model- # class itself as a value. This check ensures that we drop the # table alias into the query instead of trying to parameterize a # model (for instance, passing a model as a function argument). with self.scope_column(): return self.sql(value) self._values.append(value) return self.literal(self.state.param or '?') if add_param else self def __sql__(self, ctx): ctx._sql.extend(self._sql) ctx._values.extend(self._values) return ctx def parse(self, node): return self.sql(node).query() def query(self): return ''.join(self._sql), self._values def query_to_string(query): # NOTE: this function is not exported by default as it might be misused -- # and this misuse could lead to sql injection vulnerabilities. This # function is intended for debugging or logging purposes ONLY. db = getattr(query, '_database', None) if db is not None: ctx = db.get_sql_context() else: ctx = Context() sql, params = ctx.sql(query).query() if not params: return sql param = ctx.state.param or '?' if param == '?': sql = sql.replace('?', '%s') return sql % tuple(map(_query_val_transform, params)) def _query_val_transform(v): # Interpolate parameters. if isinstance(v, (text_type, datetime.datetime, datetime.date, datetime.time)): v = "'%s'" % v elif isinstance(v, bytes_type): try: v = v.decode('utf8') except UnicodeDecodeError: v = v.decode('raw_unicode_escape') v = "'%s'" % v elif isinstance(v, int): v = '%s' % int(v) # Also handles booleans -> 1 or 0. elif v is None: v = 'NULL' else: v = str(v) return v # AST. class Node(object): _coerce = True def clone(self): obj = self.__class__.__new__(self.__class__) obj.__dict__ = self.__dict__.copy() return obj def __sql__(self, ctx): raise NotImplementedError @staticmethod def copy(method): def inner(self, *args, **kwargs): clone = self.clone() method(clone, *args, **kwargs) return clone return inner def coerce(self, _coerce=True): if _coerce != self._coerce: clone = self.clone() clone._coerce = _coerce return clone return self def is_alias(self): return False def unwrap(self): return self class ColumnFactory(object): __slots__ = ('node',) def __init__(self, node): self.node = node def __getattr__(self, attr): return Column(self.node, attr) class _DynamicColumn(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: return ColumnFactory(instance) # Implements __getattr__(). return self class _ExplicitColumn(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: raise AttributeError( '%s specifies columns explicitly, and does not support ' 'dynamic column lookups.' % instance) return self class Source(Node): c = _DynamicColumn() def __init__(self, alias=None): super(Source, self).__init__() self._alias = alias @Node.copy def alias(self, name): self._alias = name def select(self, *columns): if not columns: columns = (SQL('*'),) return Select((self,), columns) def join(self, dest, join_type=JOIN.INNER, on=None): return Join(self, dest, join_type, on) def left_outer_join(self, dest, on=None): return Join(self, dest, JOIN.LEFT_OUTER, on) def cte(self, name, recursive=False, columns=None, materialized=None): return CTE(name, self, recursive=recursive, columns=columns, materialized=materialized) def get_sort_key(self, ctx): if self._alias: return (self._alias,) return (ctx.alias_manager[self],) def apply_alias(self, ctx): # If we are defining the source, include the "AS alias" declaration. An # alias is created for the source if one is not already defined. if ctx.scope == SCOPE_SOURCE: if self._alias: ctx.alias_manager[self] = self._alias ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self])) return ctx def apply_column(self, ctx): if self._alias: ctx.alias_manager[self] = self._alias return ctx.sql(Entity(ctx.alias_manager[self])) class _HashableSource(object): def __init__(self, *args, **kwargs): super(_HashableSource, self).__init__(*args, **kwargs) self._update_hash() @Node.copy def alias(self, name): self._alias = name self._update_hash() def _update_hash(self): self._hash = self._get_hash() def _get_hash(self): return hash((self.__class__, self._path, self._alias)) def __hash__(self): return self._hash def __eq__(self, other): if isinstance(other, _HashableSource): return self._hash == other._hash return Expression(self, OP.EQ, other) def __ne__(self, other): if isinstance(other, _HashableSource): return self._hash != other._hash return Expression(self, OP.NE, other) def _e(op): def inner(self, rhs): return Expression(self, op, rhs) return inner __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) def __bind_database__(meth): @wraps(meth) def inner(self, *args, **kwargs): result = meth(self, *args, **kwargs) if self._database: return result.bind(self._database) return result return inner def __join__(join_type=JOIN.INNER, inverted=False): def method(self, other): if inverted: self, other = other, self return Join(self, other, join_type=join_type) return method class BaseTable(Source): __and__ = __join__(JOIN.INNER) __add__ = __join__(JOIN.LEFT_OUTER) __sub__ = __join__(JOIN.RIGHT_OUTER) __or__ = __join__(JOIN.FULL_OUTER) __mul__ = __join__(JOIN.CROSS) __rand__ = __join__(JOIN.INNER, inverted=True) __radd__ = __join__(JOIN.LEFT_OUTER, inverted=True) __rsub__ = __join__(JOIN.RIGHT_OUTER, inverted=True) __ror__ = __join__(JOIN.FULL_OUTER, inverted=True) __rmul__ = __join__(JOIN.CROSS, inverted=True) class _BoundTableContext(_callable_context_manager): def __init__(self, table, database): self.table = table self.database = database def __enter__(self): self._orig_database = self.table._database self.table.bind(self.database) if self.table._model is not None: self.table._model.bind(self.database) return self.table def __exit__(self, exc_type, exc_val, exc_tb): self.table.bind(self._orig_database) if self.table._model is not None: self.table._model.bind(self._orig_database) class Table(_HashableSource, BaseTable): def __init__(self, name, columns=None, primary_key=None, schema=None, alias=None, _model=None, _database=None): self.__name__ = name self._columns = columns self._primary_key = primary_key self._schema = schema self._path = (schema, name) if schema else (name,) self._model = _model self._database = _database super(Table, self).__init__(alias=alias) # Allow tables to restrict what columns are available. if columns is not None: self.c = _ExplicitColumn() for column in columns: setattr(self, column, Column(self, column)) if primary_key: col_src = self if self._columns else self.c self.primary_key = getattr(col_src, primary_key) else: self.primary_key = None def clone(self): # Ensure a deep copy of the column instances. return Table( self.__name__, columns=self._columns, primary_key=self._primary_key, schema=self._schema, alias=self._alias, _model=self._model, _database=self._database) def bind(self, database=None): self._database = database return self def bind_ctx(self, database=None): return _BoundTableContext(self, database) def _get_hash(self): return hash((self.__class__, self._path, self._alias, self._model)) @__bind_database__ def select(self, *columns): if not columns and self._columns: columns = [Column(self, column) for column in self._columns] return Select((self,), columns) @__bind_database__ def insert(self, insert=None, columns=None, **kwargs): if kwargs: insert = {} if insert is None else insert src = self if self._columns else self.c for key, value in kwargs.items(): insert[getattr(src, key)] = value return Insert(self, insert=insert, columns=columns) @__bind_database__ def replace(self, insert=None, columns=None, **kwargs): return (self .insert(insert=insert, columns=columns) .on_conflict('REPLACE')) @__bind_database__ def update(self, update=None, **kwargs): if kwargs: update = {} if update is None else update for key, value in kwargs.items(): src = self if self._columns else self.c update[getattr(src, key)] = value return Update(self, update=update) @__bind_database__ def delete(self): return Delete(self) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: # Return the quoted table name. return ctx.sql(Entity(*self._path)) if self._alias: ctx.alias_manager[self] = self._alias if ctx.scope == SCOPE_SOURCE: # Define the table and its alias. return self.apply_alias(ctx.sql(Entity(*self._path))) else: # Refer to the table using the alias. return self.apply_column(ctx) class Join(BaseTable): def __init__(self, lhs, rhs, join_type=JOIN.INNER, on=None, alias=None): super(Join, self).__init__(alias=alias) self.lhs = lhs self.rhs = rhs self.join_type = join_type self._on = on def on(self, predicate): self._on = predicate return self def __sql__(self, ctx): (ctx .sql(self.lhs) .literal(' %s ' % self.join_type) .sql(self.rhs)) if self._on is not None: ctx.literal(' ON ').sql(self._on) return ctx class ValuesList(_HashableSource, BaseTable): def __init__(self, values, columns=None, alias=None): self._values = values self._columns = columns super(ValuesList, self).__init__(alias=alias) def _get_hash(self): return hash((self.__class__, id(self._values), self._alias)) @Node.copy def columns(self, *names): self._columns = names def __sql__(self, ctx): if self._alias: ctx.alias_manager[self] = self._alias if ctx.scope == SCOPE_SOURCE or ctx.scope == SCOPE_NORMAL: with ctx(parentheses=not ctx.parentheses): ctx = (ctx .literal('VALUES ') .sql(CommaNodeList([ EnclosedNodeList(row) for row in self._values]))) if ctx.scope == SCOPE_SOURCE: ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self])) if self._columns: entities = [Entity(c) for c in self._columns] ctx.sql(EnclosedNodeList(entities)) else: ctx.sql(Entity(ctx.alias_manager[self])) return ctx class CTE(_HashableSource, Source): def __init__(self, name, query, recursive=False, columns=None, materialized=None): self._alias = name self._query = query self._recursive = recursive self._materialized = materialized if columns is not None: columns = [Entity(c) if isinstance(c, basestring) else c for c in columns] self._columns = columns query._cte_list = () super(CTE, self).__init__(alias=name) def select_from(self, *columns): if not columns: raise ValueError('select_from() must specify one or more columns ' 'from the CTE to select.') query = (Select((self,), columns) .with_cte(self) .bind(self._query._database)) try: query = query.objects(self._query.model) except AttributeError: pass return query def _get_hash(self): return hash((self.__class__, self._alias, id(self._query))) def union_all(self, rhs): clone = self._query.clone() return CTE(self._alias, clone + rhs, self._recursive, self._columns) __add__ = union_all def union(self, rhs): clone = self._query.clone() return CTE(self._alias, clone | rhs, self._recursive, self._columns) __or__ = union def __sql__(self, ctx): if ctx.scope != SCOPE_CTE: return ctx.sql(Entity(self._alias)) with ctx.push_alias(): ctx.alias_manager[self] = self._alias ctx.sql(Entity(self._alias)) if self._columns: ctx.literal(' ').sql(EnclosedNodeList(self._columns)) ctx.literal(' AS ') if self._materialized: ctx.literal('MATERIALIZED ') elif self._materialized is False: ctx.literal('NOT MATERIALIZED ') with ctx.scope_normal(parentheses=True): ctx.sql(self._query) return ctx class ColumnBase(Node): _converter = None @Node.copy def converter(self, converter=None): self._converter = converter def alias(self, alias): if alias: return Alias(self, alias) return self def unalias(self): return self def cast(self, as_type): return Cast(self, as_type) def asc(self, collation=None, nulls=None): return Asc(self, collation=collation, nulls=nulls) __pos__ = asc def desc(self, collation=None, nulls=None): return Desc(self, collation=collation, nulls=nulls) __neg__ = desc def __invert__(self): return Negated(self) def _e(op, inv=False): """ Lightweight factory which returns a method that builds an Expression consisting of the left-hand and right-hand operands, using `op`. """ def inner(self, rhs): if inv: return Expression(rhs, op, self) return Expression(self, op, rhs) return inner __and__ = _e(OP.AND) __or__ = _e(OP.OR) __add__ = _e(OP.ADD) __sub__ = _e(OP.SUB) __mul__ = _e(OP.MUL) __div__ = __truediv__ = _e(OP.DIV) __xor__ = _e(OP.XOR) __radd__ = _e(OP.ADD, inv=True) __rsub__ = _e(OP.SUB, inv=True) __rmul__ = _e(OP.MUL, inv=True) __rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True) __rand__ = _e(OP.AND, inv=True) __ror__ = _e(OP.OR, inv=True) __rxor__ = _e(OP.XOR, inv=True) def __eq__(self, rhs): op = OP.IS if rhs is None else OP.EQ return Expression(self, op, rhs) def __ne__(self, rhs): op = OP.IS_NOT if rhs is None else OP.NE return Expression(self, op, rhs) __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) __lshift__ = _e(OP.IN) __rshift__ = _e(OP.IS) __mod__ = _e(OP.LIKE) __pow__ = _e(OP.ILIKE) like = _e(OP.LIKE) ilike = _e(OP.ILIKE) bin_and = _e(OP.BIN_AND) bin_or = _e(OP.BIN_OR) in_ = _e(OP.IN) not_in = _e(OP.NOT_IN) regexp = _e(OP.REGEXP) # Special expressions. def is_null(self, is_null=True): op = OP.IS if is_null else OP.IS_NOT return Expression(self, op, None) def _escape_like_expr(self, s, template): if s.find('_') >= 0 or s.find('%') >= 0 or s.find('\\') >= 0: s = s.replace('\\', '\\\\').replace('_', '\\_').replace('%', '\\%') return NodeList((template % s, SQL('ESCAPE'), '\\')) return template % s def contains(self, rhs): if isinstance(rhs, Node): rhs = Expression('%', OP.CONCAT, Expression(rhs, OP.CONCAT, '%')) else: rhs = self._escape_like_expr(rhs, '%%%s%%') return Expression(self, OP.ILIKE, rhs) def startswith(self, rhs): if isinstance(rhs, Node): rhs = Expression(rhs, OP.CONCAT, '%') else: rhs = self._escape_like_expr(rhs, '%s%%') return Expression(self, OP.ILIKE, rhs) def endswith(self, rhs): if isinstance(rhs, Node): rhs = Expression('%', OP.CONCAT, rhs) else: rhs = self._escape_like_expr(rhs, '%%%s') return Expression(self, OP.ILIKE, rhs) def between(self, lo, hi): return Expression(self, OP.BETWEEN, NodeList((lo, SQL('AND'), hi))) def concat(self, rhs): return StringExpression(self, OP.CONCAT, rhs) def regexp(self, rhs): return Expression(self, OP.REGEXP, rhs) def iregexp(self, rhs): return Expression(self, OP.IREGEXP, rhs) def __getitem__(self, item): if isinstance(item, slice): if item.start is None or item.stop is None: raise ValueError('BETWEEN range must have both a start- and ' 'end-point.') return self.between(item.start, item.stop) return self == item def distinct(self): return NodeList((SQL('DISTINCT'), self)) def collate(self, collation): return NodeList((self, SQL('COLLATE %s' % collation))) def get_sort_key(self, ctx): return () class Column(ColumnBase): def __init__(self, source, name): self.source = source self.name = name def get_sort_key(self, ctx): if ctx.scope == SCOPE_VALUES: return (self.name,) else: return self.source.get_sort_key(ctx) + (self.name,) def __hash__(self): return hash((self.source, self.name)) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: return ctx.sql(Entity(self.name)) else: with ctx.scope_column(): return ctx.sql(self.source).literal('.').sql(Entity(self.name)) class WrappedNode(ColumnBase): def __init__(self, node): self.node = node self._coerce = getattr(node, '_coerce', True) self._converter = getattr(node, '_converter', None) def is_alias(self): return self.node.is_alias() def unwrap(self): return self.node.unwrap() class EntityFactory(object): __slots__ = ('node',) def __init__(self, node): self.node = node def __getattr__(self, attr): return Entity(self.node, attr) class _DynamicEntity(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: return EntityFactory(instance._alias) # Implements __getattr__(). return self class Alias(WrappedNode): c = _DynamicEntity() def __init__(self, node, alias): super(Alias, self).__init__(node) self._alias = alias def __hash__(self): return hash(self._alias) def alias(self, alias=None): if alias is None: return self.node else: return Alias(self.node, alias) def unalias(self): return self.node def is_alias(self): return True def __sql__(self, ctx): if ctx.scope == SCOPE_SOURCE: return (ctx .sql(self.node) .literal(' AS ') .sql(Entity(self._alias))) else: return ctx.sql(Entity(self._alias)) class Negated(WrappedNode): def __invert__(self): return self.node def __sql__(self, ctx): return ctx.literal('NOT ').sql(self.node) class BitwiseMixin(object): def __and__(self, other): return self.bin_and(other) def __or__(self, other): return self.bin_or(other) def __sub__(self, other): return self.bin_and(other.bin_negated()) def __invert__(self): return BitwiseNegated(self) class BitwiseNegated(BitwiseMixin, WrappedNode): def __invert__(self): return self.node def __sql__(self, ctx): if ctx.state.operations: op_sql = ctx.state.operations.get(self.op, self.op) else: op_sql = self.op return ctx.literal(op_sql).sql(self.node) class Value(ColumnBase): def __init__(self, value, converter=None, unpack=True): self.value = value self.converter = converter self.multi = unpack and isinstance(self.value, multi_types) if self.multi: self.values = [] for item in self.value: if isinstance(item, Node): self.values.append(item) else: self.values.append(Value(item, self.converter)) def __sql__(self, ctx): if self.multi: # For multi-part values (e.g. lists of IDs). return ctx.sql(EnclosedNodeList(self.values)) return ctx.value(self.value, self.converter) def AsIs(value): return Value(value, unpack=False) class Cast(WrappedNode): def __init__(self, node, cast): super(Cast, self).__init__(node) self._cast = cast self._coerce = False def __sql__(self, ctx): return (ctx .literal('CAST(') .sql(self.node) .literal(' AS %s)' % self._cast)) class Ordering(WrappedNode): def __init__(self, node, direction, collation=None, nulls=None): super(Ordering, self).__init__(node) self.direction = direction self.collation = collation self.nulls = nulls if nulls and nulls.lower() not in ('first', 'last'): raise ValueError('Ordering nulls= parameter must be "first" or ' '"last", got: %s' % nulls) def collate(self, collation=None): return Ordering(self.node, self.direction, collation) def _null_ordering_case(self, nulls): if nulls.lower() == 'last': ifnull, notnull = 1, 0 elif nulls.lower() == 'first': ifnull, notnull = 0, 1 else: raise ValueError('unsupported value for nulls= ordering.') return Case(None, ((self.node.is_null(), ifnull),), notnull) def __sql__(self, ctx): if self.nulls and not ctx.state.nulls_ordering: ctx.sql(self._null_ordering_case(self.nulls)).literal(', ') ctx.sql(self.node).literal(' %s' % self.direction) if self.collation: ctx.literal(' COLLATE %s' % self.collation) if self.nulls and ctx.state.nulls_ordering: ctx.literal(' NULLS %s' % self.nulls) return ctx def Asc(node, collation=None, nulls=None): return Ordering(node, 'ASC', collation, nulls) def Desc(node, collation=None, nulls=None): return Ordering(node, 'DESC', collation, nulls) class Expression(ColumnBase): def __init__(self, lhs, op, rhs, flat=False): self.lhs = lhs self.op = op self.rhs = rhs self.flat = flat def __sql__(self, ctx): overrides = {'parentheses': not self.flat, 'in_expr': True} # First attempt to unwrap the node on the left-hand-side, so that we # can get at the underlying Field if one is present. node = raw_node = self.lhs if isinstance(raw_node, WrappedNode): node = raw_node.unwrap() # Set up the appropriate converter if we have a field on the left side. if isinstance(node, Field) and raw_node._coerce: overrides['converter'] = node.db_value overrides['is_fk_expr'] = isinstance(node, ForeignKeyField) else: overrides['converter'] = None if ctx.state.operations: op_sql = ctx.state.operations.get(self.op, self.op) else: op_sql = self.op with ctx(**overrides): # Postgresql reports an error for IN/NOT IN (), so convert to # the equivalent boolean expression. op_in = self.op == OP.IN or self.op == OP.NOT_IN if op_in and ctx.as_new().parse(self.rhs)[0] == '()': return ctx.literal('0 = 1' if self.op == OP.IN else '1 = 1') return (ctx .sql(self.lhs) .literal(' %s ' % op_sql) .sql(self.rhs)) class StringExpression(Expression): def __add__(self, rhs): return self.concat(rhs) def __radd__(self, lhs): return StringExpression(lhs, OP.CONCAT, self) class Entity(ColumnBase): def __init__(self, *path): self._path = [part.replace('"', '""') for part in path if part] def __getattr__(self, attr): return Entity(*self._path + [attr]) def get_sort_key(self, ctx): return tuple(self._path) def __hash__(self): return hash((self.__class__.__name__, tuple(self._path))) def __sql__(self, ctx): return ctx.literal(quote(self._path, ctx.state.quote or '""')) class SQL(ColumnBase): def __init__(self, sql, params=None): self.sql = sql self.params = params def __sql__(self, ctx): ctx.literal(self.sql) if self.params: for param in self.params: ctx.value(param, False, add_param=False) return ctx def Check(constraint, name=None): check = SQL('CHECK (%s)' % constraint) if not name: return check return NodeList((SQL('CONSTRAINT'), Entity(name), check)) class Function(ColumnBase): def __init__(self, name, arguments, coerce=True, python_value=None): self.name = name self.arguments = arguments self._filter = None self._order_by = None self._python_value = python_value if name and name.lower() in ('sum', 'count', 'cast', 'array_agg'): self._coerce = False else: self._coerce = coerce def __getattr__(self, attr): def decorator(*args, **kwargs): return Function(attr, args, **kwargs) return decorator @Node.copy def filter(self, where=None): self._filter = where @Node.copy def order_by(self, *ordering): self._order_by = ordering @Node.copy def python_value(self, func=None): self._python_value = func def over(self, partition_by=None, order_by=None, start=None, end=None, frame_type=None, window=None, exclude=None): if isinstance(partition_by, Window) and window is None: window = partition_by if window is not None: node = WindowAlias(window) else: node = Window(partition_by=partition_by, order_by=order_by, start=start, end=end, frame_type=frame_type, exclude=exclude, _inline=True) return NodeList((self, SQL('OVER'), node)) def __sql__(self, ctx): ctx.literal(self.name) if not len(self.arguments): ctx.literal('()') else: args = self.arguments # If this is an ordered aggregate, then we will modify the last # argument to append the ORDER BY ... clause. We do this to avoid # double-wrapping any expression args in parentheses, as NodeList # has a special check (hack) in place to work around this. if self._order_by: args = list(args) args[-1] = NodeList((args[-1], SQL('ORDER BY'), CommaNodeList(self._order_by))) with ctx(in_function=True, function_arg_count=len(self.arguments)): ctx.sql(EnclosedNodeList([ (arg if isinstance(arg, Node) else Value(arg, False)) for arg in args])) if self._filter: ctx.literal(' FILTER (WHERE ').sql(self._filter).literal(')') return ctx fn = Function(None, None) class Window(Node): # Frame start/end and frame exclusion. CURRENT_ROW = SQL('CURRENT ROW') GROUP = SQL('GROUP') TIES = SQL('TIES') NO_OTHERS = SQL('NO OTHERS') # Frame types. GROUPS = 'GROUPS' RANGE = 'RANGE' ROWS = 'ROWS' def __init__(self, partition_by=None, order_by=None, start=None, end=None, frame_type=None, extends=None, exclude=None, alias=None, _inline=False): super(Window, self).__init__() if start is not None and not isinstance(start, SQL): start = SQL(start) if end is not None and not isinstance(end, SQL): end = SQL(end) self.partition_by = ensure_tuple(partition_by) self.order_by = ensure_tuple(order_by) self.start = start self.end = end if self.start is None and self.end is not None: raise ValueError('Cannot specify WINDOW end without start.') self._alias = alias or 'w' self._inline = _inline self.frame_type = frame_type self._extends = extends self._exclude = exclude def alias(self, alias=None): self._alias = alias or 'w' return self @Node.copy def as_range(self): self.frame_type = Window.RANGE @Node.copy def as_rows(self): self.frame_type = Window.ROWS @Node.copy def as_groups(self): self.frame_type = Window.GROUPS @Node.copy def extends(self, window=None): self._extends = window @Node.copy def exclude(self, frame_exclusion=None): if isinstance(frame_exclusion, basestring): frame_exclusion = SQL(frame_exclusion) self._exclude = frame_exclusion @staticmethod def following(value=None): if value is None: return SQL('UNBOUNDED FOLLOWING') return SQL('%d FOLLOWING' % value) @staticmethod def preceding(value=None): if value is None: return SQL('UNBOUNDED PRECEDING') return SQL('%d PRECEDING' % value) def __sql__(self, ctx): if ctx.scope != SCOPE_SOURCE and not self._inline: ctx.literal(self._alias) ctx.literal(' AS ') with ctx(parentheses=True): parts = [] if self._extends is not None: ext = self._extends if isinstance(ext, Window): ext = SQL(ext._alias) elif isinstance(ext, basestring): ext = SQL(ext) parts.append(ext) if self.partition_by: parts.extend(( SQL('PARTITION BY'), CommaNodeList(self.partition_by))) if self.order_by: parts.extend(( SQL('ORDER BY'), CommaNodeList(self.order_by))) if self.start is not None and self.end is not None: frame = self.frame_type or 'ROWS' parts.extend(( SQL('%s BETWEEN' % frame), self.start, SQL('AND'), self.end)) elif self.start is not None: parts.extend((SQL(self.frame_type or 'ROWS'), self.start)) elif self.frame_type is not None: parts.append(SQL('%s UNBOUNDED PRECEDING' % self.frame_type)) if self._exclude is not None: parts.extend((SQL('EXCLUDE'), self._exclude)) ctx.sql(NodeList(parts)) return ctx class WindowAlias(Node): def __init__(self, window): self.window = window def alias(self, window_alias): self.window._alias = window_alias return self def __sql__(self, ctx): return ctx.literal(self.window._alias or 'w') class ForUpdate(Node): def __init__(self, expr, of=None, nowait=None): expr = 'FOR UPDATE' if expr is True else expr if expr.lower().endswith('nowait'): expr = expr[:-7] # Strip off the "nowait" bit. nowait = True self._expr = expr if of is not None and not isinstance(of, (list, set, tuple)): of = (of,) self._of = of self._nowait = nowait def __sql__(self, ctx): ctx.literal(self._expr) if self._of is not None: ctx.literal(' OF ').sql(CommaNodeList(self._of)) if self._nowait: ctx.literal(' NOWAIT') return ctx def Case(predicate, expression_tuples, default=None): clauses = [SQL('CASE')] if predicate is not None: clauses.append(predicate) for expr, value in expression_tuples: clauses.extend((SQL('WHEN'), expr, SQL('THEN'), value)) if default is not None: clauses.extend((SQL('ELSE'), default)) clauses.append(SQL('END')) return NodeList(clauses) class NodeList(ColumnBase): def __init__(self, nodes, glue=' ', parens=False): self.nodes = nodes self.glue = glue self.parens = parens if parens and len(self.nodes) == 1 and \ isinstance(self.nodes[0], Expression) and \ not self.nodes[0].flat: # Hack to avoid double-parentheses. self.nodes = (self.nodes[0].clone(),) self.nodes[0].flat = True def __sql__(self, ctx): n_nodes = len(self.nodes) if n_nodes == 0: return ctx.literal('()') if self.parens else ctx with ctx(parentheses=self.parens): for i in range(n_nodes - 1): ctx.sql(self.nodes[i]) ctx.literal(self.glue) ctx.sql(self.nodes[n_nodes - 1]) return ctx def CommaNodeList(nodes): return NodeList(nodes, ', ') def EnclosedNodeList(nodes): return NodeList(nodes, ', ', True) class _Namespace(Node): __slots__ = ('_name',) def __init__(self, name): self._name = name def __getattr__(self, attr): return NamespaceAttribute(self, attr) __getitem__ = __getattr__ class NamespaceAttribute(ColumnBase): def __init__(self, namespace, attribute): self._namespace = namespace self._attribute = attribute def __sql__(self, ctx): return (ctx .literal(self._namespace._name + '.') .sql(Entity(self._attribute))) EXCLUDED = _Namespace('EXCLUDED') class DQ(ColumnBase): def __init__(self, **query): super(DQ, self).__init__() self.query = query self._negated = False @Node.copy def __invert__(self): self._negated = not self._negated def clone(self): node = DQ(**self.query) node._negated = self._negated return node #: Represent a row tuple. Tuple = lambda *a: EnclosedNodeList(a) class QualifiedNames(WrappedNode): def __sql__(self, ctx): with ctx.scope_column(): return ctx.sql(self.node) def qualify_names(node): # Search a node heirarchy to ensure that any column-like objects are # referenced using fully-qualified names. if isinstance(node, Expression): return node.__class__(qualify_names(node.lhs), node.op, qualify_names(node.rhs), node.flat) elif isinstance(node, ColumnBase): return QualifiedNames(node) return node class OnConflict(Node): def __init__(self, action=None, update=None, preserve=None, where=None, conflict_target=None, conflict_where=None, conflict_constraint=None): self._action = action self._update = update self._preserve = ensure_tuple(preserve) self._where = where if conflict_target is not None and conflict_constraint is not None: raise ValueError('only one of "conflict_target" and ' '"conflict_constraint" may be specified.') self._conflict_target = ensure_tuple(conflict_target) self._conflict_where = conflict_where self._conflict_constraint = conflict_constraint def get_conflict_statement(self, ctx, query): return ctx.state.conflict_statement(self, query) def get_conflict_update(self, ctx, query): return ctx.state.conflict_update(self, query) @Node.copy def preserve(self, *columns): self._preserve = columns @Node.copy def update(self, _data=None, **kwargs): if _data and kwargs and not isinstance(_data, dict): raise ValueError('Cannot mix data with keyword arguments in the ' 'OnConflict update method.') _data = _data or {} if kwargs: _data.update(kwargs) self._update = _data @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def conflict_target(self, *constraints): self._conflict_constraint = None self._conflict_target = constraints @Node.copy def conflict_where(self, *expressions): if self._conflict_where is not None: expressions = (self._conflict_where,) + expressions self._conflict_where = reduce(operator.and_, expressions) @Node.copy def conflict_constraint(self, constraint): self._conflict_constraint = constraint self._conflict_target = None def database_required(method): @wraps(method) def inner(self, database=None, *args, **kwargs): database = self._database if database is None else database if not database: raise InterfaceError('Query must be bound to a database in order ' 'to call "%s".' % method.__name__) return method(self, database, *args, **kwargs) return inner # BASE QUERY INTERFACE. class BaseQuery(Node): default_row_type = ROW.DICT def __init__(self, _database=None, **kwargs): self._database = _database self._cursor_wrapper = None self._row_type = None self._constructor = None super(BaseQuery, self).__init__(**kwargs) def bind(self, database=None): self._database = database return self def clone(self): query = super(BaseQuery, self).clone() query._cursor_wrapper = None return query @Node.copy def dicts(self, as_dict=True): self._row_type = ROW.DICT if as_dict else None return self @Node.copy def tuples(self, as_tuple=True): self._row_type = ROW.TUPLE if as_tuple else None return self @Node.copy def namedtuples(self, as_namedtuple=True): self._row_type = ROW.NAMED_TUPLE if as_namedtuple else None return self @Node.copy def objects(self, constructor=None): self._row_type = ROW.CONSTRUCTOR if constructor else None self._constructor = constructor return self def _get_cursor_wrapper(self, cursor): row_type = self._row_type or self.default_row_type if row_type == ROW.DICT: return DictCursorWrapper(cursor) elif row_type == ROW.TUPLE: return CursorWrapper(cursor) elif row_type == ROW.NAMED_TUPLE: return NamedTupleCursorWrapper(cursor) elif row_type == ROW.CONSTRUCTOR: return ObjectCursorWrapper(cursor, self._constructor) else: raise ValueError('Unrecognized row type: "%s".' % row_type) def __sql__(self, ctx): raise NotImplementedError def sql(self): if self._database: context = self._database.get_sql_context() else: context = Context() return context.parse(self) @database_required def execute(self, database): return self._execute(database) def _execute(self, database): raise NotImplementedError def iterator(self, database=None): return iter(self.execute(database).iterator()) def _ensure_execution(self): if not self._cursor_wrapper: if not self._database: raise ValueError('Query has not been executed.') self.execute() def __iter__(self): self._ensure_execution() return iter(self._cursor_wrapper) def __getitem__(self, value): self._ensure_execution() if isinstance(value, slice): index = value.stop else: index = value if index is not None: index = index + 1 if index >= 0 else 0 self._cursor_wrapper.fill_cache(index) return self._cursor_wrapper.row_cache[value] def __len__(self): self._ensure_execution() return len(self._cursor_wrapper) def __str__(self): return query_to_string(self) class RawQuery(BaseQuery): def __init__(self, sql=None, params=None, **kwargs): super(RawQuery, self).__init__(**kwargs) self._sql = sql self._params = params def __sql__(self, ctx): ctx.literal(self._sql) if self._params: for param in self._params: ctx.value(param, add_param=False) return ctx def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper class Query(BaseQuery): def __init__(self, where=None, order_by=None, limit=None, offset=None, **kwargs): super(Query, self).__init__(**kwargs) self._where = where self._order_by = order_by self._limit = limit self._offset = offset self._cte_list = None @Node.copy def with_cte(self, *cte_list): self._cte_list = cte_list @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def orwhere(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.or_, expressions) @Node.copy def order_by(self, *values): self._order_by = values @Node.copy def order_by_extend(self, *values): self._order_by = ((self._order_by or ()) + values) or None @Node.copy def limit(self, value=None): self._limit = value @Node.copy def offset(self, value=None): self._offset = value @Node.copy def paginate(self, page, paginate_by=20): if page > 0: page -= 1 self._limit = paginate_by self._offset = page * paginate_by def _apply_ordering(self, ctx): if self._order_by: (ctx .literal(' ORDER BY ') .sql(CommaNodeList(self._order_by))) if self._limit is not None or (self._offset is not None and ctx.state.limit_max): limit = ctx.state.limit_max if self._limit is None else self._limit ctx.literal(' LIMIT ').sql(limit) if self._offset is not None: ctx.literal(' OFFSET ').sql(self._offset) return ctx def __sql__(self, ctx): if self._cte_list: # The CTE scope is only used at the very beginning of the query, # when we are describing the various CTEs we will be using. recursive = any(cte._recursive for cte in self._cte_list) # Explicitly disable the "subquery" flag here, so as to avoid # unnecessary parentheses around subsequent selects. with ctx.scope_cte(subquery=False): (ctx .literal('WITH RECURSIVE ' if recursive else 'WITH ') .sql(CommaNodeList(self._cte_list)) .literal(' ')) return ctx def __compound_select__(operation, inverted=False): def method(self, other): if inverted: self, other = other, self return CompoundSelectQuery(self, operation, other) return method class SelectQuery(Query): union_all = __add__ = __compound_select__('UNION ALL') union = __or__ = __compound_select__('UNION') intersect = __and__ = __compound_select__('INTERSECT') except_ = __sub__ = __compound_select__('EXCEPT') __radd__ = __compound_select__('UNION ALL', inverted=True) __ror__ = __compound_select__('UNION', inverted=True) __rand__ = __compound_select__('INTERSECT', inverted=True) __rsub__ = __compound_select__('EXCEPT', inverted=True) def select_from(self, *columns): if not columns: raise ValueError('select_from() must specify one or more columns.') query = (Select((self,), columns) .bind(self._database)) if getattr(self, 'model', None) is not None: # Bind to the sub-select's model type, if defined. query = query.objects(self.model) return query class SelectBase(_HashableSource, Source, SelectQuery): def _get_hash(self): return hash((self.__class__, self._alias or id(self))) def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper @database_required def peek(self, database, n=1): rows = self.execute(database)[:n] if rows: return rows[0] if n == 1 else rows @database_required def first(self, database, n=1): if self._limit != n: self._limit = n self._cursor_wrapper = None return self.peek(database, n=n) @database_required def scalar(self, database, as_tuple=False): row = self.tuples().peek(database) return row[0] if row and not as_tuple else row @database_required def count(self, database, clear_limit=False): clone = self.order_by().alias('_wrapped') if clear_limit: clone._limit = clone._offset = None try: if clone._having is None and clone._group_by is None and \ clone._windows is None and clone._distinct is None and \ clone._simple_distinct is not True: clone = clone.select(SQL('1')) except AttributeError: pass return Select([clone], [fn.COUNT(SQL('1'))]).scalar(database) @database_required def exists(self, database): clone = self.columns(SQL('1')) clone._limit = 1 clone._offset = None return bool(clone.scalar()) @database_required def get(self, database): self._cursor_wrapper = None try: return self.execute(database)[0] except IndexError: pass # QUERY IMPLEMENTATIONS. class CompoundSelectQuery(SelectBase): def __init__(self, lhs, op, rhs): super(CompoundSelectQuery, self).__init__() self.lhs = lhs self.op = op self.rhs = rhs @property def _returning(self): return self.lhs._returning @database_required def exists(self, database): query = Select((self.limit(1),), (SQL('1'),)).bind(database) return bool(query.scalar()) def _get_query_key(self): return (self.lhs.get_query_key(), self.rhs.get_query_key()) def _wrap_parens(self, ctx, subq): csq_setting = ctx.state.compound_select_parentheses if not csq_setting or csq_setting == CSQ_PARENTHESES_NEVER: return False elif csq_setting == CSQ_PARENTHESES_ALWAYS: return True elif csq_setting == CSQ_PARENTHESES_UNNESTED: if ctx.state.in_expr or ctx.state.in_function: # If this compound select query is being used inside an # expression, e.g., an IN or EXISTS(). return False # If the query on the left or right is itself a compound select # query, then we do not apply parentheses. However, if it is a # regular SELECT query, we will apply parentheses. return not isinstance(subq, CompoundSelectQuery) def __sql__(self, ctx): if ctx.scope == SCOPE_COLUMN: return self.apply_column(ctx) # Call parent method to handle any CTEs. super(CompoundSelectQuery, self).__sql__(ctx) outer_parens = ctx.subquery or (ctx.scope == SCOPE_SOURCE) with ctx(parentheses=outer_parens): # Should the left-hand query be wrapped in parentheses? lhs_parens = self._wrap_parens(ctx, self.lhs) with ctx.scope_normal(parentheses=lhs_parens, subquery=False): ctx.sql(self.lhs) ctx.literal(' %s ' % self.op) with ctx.push_alias(): # Should the right-hand query be wrapped in parentheses? rhs_parens = self._wrap_parens(ctx, self.rhs) with ctx.scope_normal(parentheses=rhs_parens, subquery=False): ctx.sql(self.rhs) # Apply ORDER BY, LIMIT, OFFSET. We use the "values" scope so that # entity names are not fully-qualified. This is a bit of a hack, as # we're relying on the logic in Column.__sql__() to not fully # qualify column names. with ctx.scope_values(): self._apply_ordering(ctx) return self.apply_alias(ctx) class Select(SelectBase): def __init__(self, from_list=None, columns=None, group_by=None, having=None, distinct=None, windows=None, for_update=None, for_update_of=None, nowait=None, lateral=None, **kwargs): super(Select, self).__init__(**kwargs) self._from_list = (list(from_list) if isinstance(from_list, tuple) else from_list) or [] self._returning = columns self._group_by = group_by self._having = having self._windows = None self._for_update = for_update # XXX: consider reorganizing. self._for_update_of = for_update_of self._for_update_nowait = nowait self._lateral = lateral self._distinct = self._simple_distinct = None if distinct: if isinstance(distinct, bool): self._simple_distinct = distinct else: self._distinct = distinct self._cursor_wrapper = None def clone(self): clone = super(Select, self).clone() if clone._from_list: clone._from_list = list(clone._from_list) return clone @Node.copy def columns(self, *columns, **kwargs): self._returning = columns select = columns @Node.copy def select_extend(self, *columns): self._returning = tuple(self._returning) + columns @Node.copy def from_(self, *sources): self._from_list = list(sources) @Node.copy def join(self, dest, join_type=JOIN.INNER, on=None): if not self._from_list: raise ValueError('No sources to join on.') item = self._from_list.pop() self._from_list.append(Join(item, dest, join_type, on)) @Node.copy def group_by(self, *columns): grouping = [] for column in columns: if isinstance(column, Table): if not column._columns: raise ValueError('Cannot pass a table to group_by() that ' 'does not have columns explicitly ' 'declared.') grouping.extend([getattr(column, col_name) for col_name in column._columns]) else: grouping.append(column) self._group_by = grouping def group_by_extend(self, *values): """@Node.copy used from group_by() call""" group_by = tuple(self._group_by or ()) + values return self.group_by(*group_by) @Node.copy def having(self, *expressions): if self._having is not None: expressions = (self._having,) + expressions self._having = reduce(operator.and_, expressions) @Node.copy def distinct(self, *columns): if len(columns) == 1 and (columns[0] is True or columns[0] is False): self._simple_distinct = columns[0] else: self._simple_distinct = False self._distinct = columns @Node.copy def window(self, *windows): self._windows = windows if windows else None @Node.copy def for_update(self, for_update=True, of=None, nowait=None): if not for_update and (of is not None or nowait): for_update = True self._for_update = for_update self._for_update_of = of self._for_update_nowait = nowait @Node.copy def lateral(self, lateral=True): self._lateral = lateral def _get_query_key(self): return self._alias def __sql_selection__(self, ctx, is_subquery=False): return ctx.sql(CommaNodeList(self._returning)) def __sql__(self, ctx): if ctx.scope == SCOPE_COLUMN: return self.apply_column(ctx) if self._lateral and ctx.scope == SCOPE_SOURCE: ctx.literal('LATERAL ') is_subquery = ctx.subquery state = { 'converter': None, 'in_function': False, 'parentheses': is_subquery or (ctx.scope == SCOPE_SOURCE), 'subquery': True, } if ctx.state.in_function and ctx.state.function_arg_count == 1: state['parentheses'] = False with ctx.scope_normal(**state): # Defer calling parent SQL until here. This ensures that any CTEs # for this query will be properly nested if this query is a # sub-select or is used in an expression. See GH#1809 for example. super(Select, self).__sql__(ctx) ctx.literal('SELECT ') if self._simple_distinct or self._distinct is not None: ctx.literal('DISTINCT ') if self._distinct: (ctx .literal('ON ') .sql(EnclosedNodeList(self._distinct)) .literal(' ')) with ctx.scope_source(): ctx = self.__sql_selection__(ctx, is_subquery) if self._from_list: with ctx.scope_source(parentheses=False): ctx.literal(' FROM ').sql(CommaNodeList(self._from_list)) if self._where is not None: ctx.literal(' WHERE ').sql(self._where) if self._group_by: ctx.literal(' GROUP BY ').sql(CommaNodeList(self._group_by)) if self._having is not None: ctx.literal(' HAVING ').sql(self._having) if self._windows is not None: ctx.literal(' WINDOW ') ctx.sql(CommaNodeList(self._windows)) # Apply ORDER BY, LIMIT, OFFSET. self._apply_ordering(ctx) if self._for_update: if not ctx.state.for_update: raise ValueError('FOR UPDATE specified but not supported ' 'by database.') ctx.literal(' ') ctx.sql(ForUpdate(self._for_update, self._for_update_of, self._for_update_nowait)) # If the subquery is inside a function -or- we are evaluating a # subquery on either side of an expression w/o an explicit alias, do # not generate an alias + AS clause. if ctx.state.in_function or (ctx.state.in_expr and self._alias is None): return ctx return self.apply_alias(ctx) class _WriteQuery(Query): def __init__(self, table, returning=None, **kwargs): self.table = table self._returning = returning self._return_cursor = True if returning else False super(_WriteQuery, self).__init__(**kwargs) @Node.copy def returning(self, *returning): self._returning = returning self._return_cursor = True if returning else False def apply_returning(self, ctx): if self._returning: with ctx.scope_source(): ctx.literal(' RETURNING ').sql(CommaNodeList(self._returning)) return ctx def _execute(self, database): if self._returning: cursor = self.execute_returning(database) else: cursor = database.execute(self) return self.handle_result(database, cursor) def execute_returning(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper def handle_result(self, database, cursor): if self._return_cursor: return cursor return database.rows_affected(cursor) def _set_table_alias(self, ctx): ctx.alias_manager[self.table] = self.table.__name__ def __sql__(self, ctx): super(_WriteQuery, self).__sql__(ctx) # We explicitly set the table alias to the table's name, which ensures # that if a sub-select references a column on the outer table, we won't # assign it a new alias (e.g. t2) but will refer to it as table.column. self._set_table_alias(ctx) return ctx class Update(_WriteQuery): def __init__(self, table, update=None, **kwargs): super(Update, self).__init__(table, **kwargs) self._update = update self._from = None @Node.copy def from_(self, *sources): self._from = sources def __sql__(self, ctx): super(Update, self).__sql__(ctx) with ctx.scope_values(subquery=True): ctx.literal('UPDATE ') expressions = [] for k, v in sorted(self._update.items(), key=ctx.column_sort_key): if not isinstance(v, Node): if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) elif isinstance(v, Model) and isinstance(k, ForeignKeyField): # NB: we want to ensure that when passed a model instance # in the context of a foreign-key, we apply the fk-specific # adaptation of the model. v = k.to_value(v) if not isinstance(v, Value): v = qualify_names(v) expressions.append(NodeList((k, SQL('='), v))) (ctx .sql(self.table) .literal(' SET ') .sql(CommaNodeList(expressions))) if self._from: with ctx.scope_source(parentheses=False): ctx.literal(' FROM ').sql(CommaNodeList(self._from)) if self._where: with ctx.scope_normal(): ctx.literal(' WHERE ').sql(self._where) self._apply_ordering(ctx) return self.apply_returning(ctx) class Insert(_WriteQuery): SIMPLE = 0 QUERY = 1 MULTI = 2 class DefaultValuesException(Exception): pass def __init__(self, table, insert=None, columns=None, on_conflict=None, **kwargs): super(Insert, self).__init__(table, **kwargs) self._insert = insert self._columns = columns self._on_conflict = on_conflict self._query_type = None def where(self, *expressions): raise NotImplementedError('INSERT queries cannot have a WHERE clause.') @Node.copy def on_conflict_ignore(self, ignore=True): self._on_conflict = OnConflict('IGNORE') if ignore else None @Node.copy def on_conflict_replace(self, replace=True): self._on_conflict = OnConflict('REPLACE') if replace else None @Node.copy def on_conflict(self, *args, **kwargs): self._on_conflict = (OnConflict(*args, **kwargs) if (args or kwargs) else None) def _simple_insert(self, ctx): if not self._insert: raise self.DefaultValuesException('Error: no data to insert.') return self._generate_insert((self._insert,), ctx) def get_default_data(self): return {} def get_default_columns(self): if self.table._columns: return [getattr(self.table, col) for col in self.table._columns if col != self.table._primary_key] def _generate_insert(self, insert, ctx): rows_iter = iter(insert) columns = self._columns # Load and organize column defaults (if provided). defaults = self.get_default_data() # First figure out what columns are being inserted (if they weren't # specified explicitly). Resulting columns are normalized and ordered. if not columns: try: row = next(rows_iter) except StopIteration: raise self.DefaultValuesException('Error: no rows to insert.') if not isinstance(row, Mapping): columns = self.get_default_columns() if columns is None: raise ValueError('Bulk insert must specify columns.') else: # Infer column names from the dict of data being inserted. accum = [] for column in row: if isinstance(column, basestring): column = getattr(self.table, column) accum.append(column) # Add any columns present in the default data that are not # accounted for by the dictionary of row data. column_set = set(accum) for col in (set(defaults) - column_set): accum.append(col) columns = sorted(accum, key=lambda obj: obj.get_sort_key(ctx)) rows_iter = itertools.chain(iter((row,)), rows_iter) else: clean_columns = [] seen = set() for column in columns: if isinstance(column, basestring): column_obj = getattr(self.table, column) else: column_obj = column clean_columns.append(column_obj) seen.add(column_obj) columns = clean_columns for col in sorted(defaults, key=lambda obj: obj.get_sort_key(ctx)): if col not in seen: columns.append(col) fk_fields = set() nullable_columns = set() value_lookups = {} for column in columns: lookups = [column, column.name] if isinstance(column, Field): if column.name != column.column_name: lookups.append(column.column_name) if column.null: nullable_columns.add(column) if isinstance(column, ForeignKeyField): fk_fields.add(column) value_lookups[column] = lookups ctx.sql(EnclosedNodeList(columns)).literal(' VALUES ') columns_converters = [ (column, column.db_value if isinstance(column, Field) else None) for column in columns] all_values = [] for row in rows_iter: values = [] is_dict = isinstance(row, Mapping) for i, (column, converter) in enumerate(columns_converters): try: if is_dict: # The logic is a bit convoluted, but in order to be # flexible in what we accept (dict keyed by # column/field, field name, or underlying column name), # we try accessing the row data dict using each # possible key. If no match is found, throw an error. for lookup in value_lookups[column]: try: val = row[lookup] except KeyError: pass else: break else: raise KeyError else: val = row[i] except (KeyError, IndexError): if column in defaults: val = defaults[column] if callable_(val): val = val() elif column in nullable_columns: val = None else: raise ValueError('Missing value for %s.' % column.name) if not isinstance(val, Node) or (isinstance(val, Model) and column in fk_fields): val = Value(val, converter=converter, unpack=False) values.append(val) all_values.append(EnclosedNodeList(values)) if not all_values: raise self.DefaultValuesException('Error: no data to insert.') with ctx.scope_values(subquery=True): return ctx.sql(CommaNodeList(all_values)) def _query_insert(self, ctx): return (ctx .sql(EnclosedNodeList(self._columns)) .literal(' ') .sql(self._insert)) def _default_values(self, ctx): if not self._database: return ctx.literal('DEFAULT VALUES') return self._database.default_values_insert(ctx) def __sql__(self, ctx): super(Insert, self).__sql__(ctx) with ctx.scope_values(): stmt = None if self._on_conflict is not None: stmt = self._on_conflict.get_conflict_statement(ctx, self) (ctx .sql(stmt or SQL('INSERT')) .literal(' INTO ') .sql(self.table) .literal(' ')) if isinstance(self._insert, Mapping) and not self._columns: try: self._simple_insert(ctx) except self.DefaultValuesException: self._default_values(ctx) self._query_type = Insert.SIMPLE elif isinstance(self._insert, (SelectQuery, SQL)): self._query_insert(ctx) self._query_type = Insert.QUERY else: self._generate_insert(self._insert, ctx) self._query_type = Insert.MULTI if self._on_conflict is not None: update = self._on_conflict.get_conflict_update(ctx, self) if update is not None: ctx.literal(' ').sql(update) return self.apply_returning(ctx) def _execute(self, database): if self._returning is None and database.returning_clause \ and self.table._primary_key: self._returning = (self.table._primary_key,) try: return super(Insert, self)._execute(database) except self.DefaultValuesException: pass def handle_result(self, database, cursor): if self._return_cursor: return cursor if self._query_type != Insert.SIMPLE and not self._returning: return database.rows_affected(cursor) return database.last_insert_id(cursor, self._query_type) class Delete(_WriteQuery): def __sql__(self, ctx): super(Delete, self).__sql__(ctx) with ctx.scope_values(subquery=True): ctx.literal('DELETE FROM ').sql(self.table) if self._where is not None: with ctx.scope_normal(): ctx.literal(' WHERE ').sql(self._where) self._apply_ordering(ctx) return self.apply_returning(ctx) class Index(Node): def __init__(self, name, table, expressions, unique=False, safe=False, where=None, using=None): self._name = name self._table = Entity(table) if not isinstance(table, Table) else table self._expressions = expressions self._where = where self._unique = unique self._safe = safe self._using = using @Node.copy def safe(self, _safe=True): self._safe = _safe @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def using(self, _using=None): self._using = _using def __sql__(self, ctx): statement = 'CREATE UNIQUE INDEX ' if self._unique else 'CREATE INDEX ' with ctx.scope_values(subquery=True): ctx.literal(statement) if self._safe: ctx.literal('IF NOT EXISTS ') # Sqlite uses CREATE INDEX <schema>.<name> ON <table>, whereas most # others use: CREATE INDEX <name> ON <schema>.<table>. if ctx.state.index_schema_prefix and \ isinstance(self._table, Table) and self._table._schema: index_name = Entity(self._table._schema, self._name) table_name = Entity(self._table.__name__) else: index_name = Entity(self._name) table_name = self._table ctx.sql(index_name) if self._using is not None and \ ctx.state.index_using_precedes_table: ctx.literal(' USING %s' % self._using) # MySQL style. (ctx .literal(' ON ') .sql(table_name) .literal(' ')) if self._using is not None and not \ ctx.state.index_using_precedes_table: ctx.literal('USING %s ' % self._using) # Postgres/default. ctx.sql(EnclosedNodeList([ SQL(expr) if isinstance(expr, basestring) else expr for expr in self._expressions])) if self._where is not None: ctx.literal(' WHERE ').sql(self._where) return ctx class ModelIndex(Index): def __init__(self, model, fields, unique=False, safe=True, where=None, using=None, name=None): self._model = model if name is None: name = self._generate_name_from_fields(model, fields) if using is None: for field in fields: if isinstance(field, Field) and hasattr(field, 'index_type'): using = field.index_type super(ModelIndex, self).__init__( name=name, table=model._meta.table, expressions=fields, unique=unique, safe=safe, where=where, using=using) def _generate_name_from_fields(self, model, fields): accum = [] for field in fields: if isinstance(field, basestring): accum.append(field.split()[0]) else: if isinstance(field, Node) and not isinstance(field, Field): field = field.unwrap() if isinstance(field, Field): accum.append(field.column_name) if not accum: raise ValueError('Unable to generate a name for the index, please ' 'explicitly specify a name.') clean_field_names = re.sub(r'[^\w]+', '', '_'.join(accum)) meta = model._meta prefix = meta.name if meta.legacy_table_names else meta.table_name return _truncate_constraint_name('_'.join((prefix, clean_field_names))) def _truncate_constraint_name(constraint, maxlen=64): if len(constraint) > maxlen: name_hash = hashlib.md5(constraint.encode('utf-8')).hexdigest() constraint = '%s_%s' % (constraint[:(maxlen - 8)], name_hash[:7]) return constraint # DB-API 2.0 EXCEPTIONS. class PeeweeException(Exception): def __init__(self, *args): if args and isinstance(args[0], Exception): self.orig, args = args[0], args[1:] super(PeeweeException, self).__init__(*args) class ImproperlyConfigured(PeeweeException): pass class DatabaseError(PeeweeException): pass class DataError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InterfaceError(PeeweeException): pass class InternalError(DatabaseError): pass class NotSupportedError(DatabaseError): pass class OperationalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class ExceptionWrapper(object): __slots__ = ('exceptions',) def __init__(self, exceptions): self.exceptions = exceptions def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: return # psycopg2.8 shits out a million cute error types. Try to catch em all. if pg_errors is not None and exc_type.__name__ not in self.exceptions \ and issubclass(exc_type, pg_errors.Error): exc_type = exc_type.__bases__[0] if exc_type.__name__ in self.exceptions: new_type = self.exceptions[exc_type.__name__] exc_args = exc_value.args reraise(new_type, new_type(exc_value, *exc_args), traceback) EXCEPTIONS = { 'ConstraintError': IntegrityError, 'DatabaseError': DatabaseError, 'DataError': DataError, 'IntegrityError': IntegrityError, 'InterfaceError': InterfaceError, 'InternalError': InternalError, 'NotSupportedError': NotSupportedError, 'OperationalError': OperationalError, 'ProgrammingError': ProgrammingError, 'TransactionRollbackError': OperationalError} __exception_wrapper__ = ExceptionWrapper(EXCEPTIONS) # DATABASE INTERFACE AND CONNECTION MANAGEMENT. IndexMetadata = collections.namedtuple( 'IndexMetadata', ('name', 'sql', 'columns', 'unique', 'table')) ColumnMetadata = collections.namedtuple( 'ColumnMetadata', ('name', 'data_type', 'null', 'primary_key', 'table', 'default')) ForeignKeyMetadata = collections.namedtuple( 'ForeignKeyMetadata', ('column', 'dest_table', 'dest_column', 'table')) ViewMetadata = collections.namedtuple('ViewMetadata', ('name', 'sql')) class _ConnectionState(object): def __init__(self, **kwargs): super(_ConnectionState, self).__init__(**kwargs) self.reset() def reset(self): self.closed = True self.conn = None self.ctx = [] self.transactions = [] def set_connection(self, conn): self.conn = conn self.closed = False self.ctx = [] self.transactions = [] class _ConnectionLocal(_ConnectionState, threading.local): pass class _NoopLock(object): __slots__ = () def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class ConnectionContext(_callable_context_manager): __slots__ = ('db',) def __init__(self, db): self.db = db def __enter__(self): if self.db.is_closed(): self.db.connect() def __exit__(self, exc_type, exc_val, exc_tb): self.db.close() class Database(_callable_context_manager): context_class = Context field_types = {} operations = {} param = '?' quote = '""' server_version = None # Feature toggles. commit_select = False compound_select_parentheses = CSQ_PARENTHESES_NEVER for_update = False index_schema_prefix = False index_using_precedes_table = False limit_max = None nulls_ordering = False returning_clause = False safe_create_index = True safe_drop_index = True sequences = False truncate_table = True def __init__(self, database, thread_safe=True, autorollback=False, field_types=None, operations=None, autocommit=None, autoconnect=True, **kwargs): self._field_types = merge_dict(FIELD, self.field_types) self._operations = merge_dict(OP, self.operations) if field_types: self._field_types.update(field_types) if operations: self._operations.update(operations) self.autoconnect = autoconnect self.autorollback = autorollback self.thread_safe = thread_safe if thread_safe: self._state = _ConnectionLocal() self._lock = threading.RLock() else: self._state = _ConnectionState() self._lock = _NoopLock() if autocommit is not None: __deprecated__('Peewee no longer uses the "autocommit" option, as ' 'the semantics now require it to always be True. ' 'Because some database-drivers also use the ' '"autocommit" parameter, you are receiving a ' 'warning so you may update your code and remove ' 'the parameter, as in the future, specifying ' 'autocommit could impact the behavior of the ' 'database driver you are using.') self.connect_params = {} self.init(database, **kwargs) def init(self, database, **kwargs): if not self.is_closed(): self.close() self.database = database self.connect_params.update(kwargs) self.deferred = not bool(database) def __enter__(self): if self.is_closed(): self.connect() ctx = self.atomic() self._state.ctx.append(ctx) ctx.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): ctx = self._state.ctx.pop() try: ctx.__exit__(exc_type, exc_val, exc_tb) finally: if not self._state.ctx: self.close() def connection_context(self): return ConnectionContext(self) def _connect(self): raise NotImplementedError def connect(self, reuse_if_open=False): with self._lock: if self.deferred: raise InterfaceError('Error, database must be initialized ' 'before opening a connection.') if not self._state.closed: if reuse_if_open: return False raise OperationalError('Connection already opened.') self._state.reset() with __exception_wrapper__: self._state.set_connection(self._connect()) if self.server_version is None: self._set_server_version(self._state.conn) self._initialize_connection(self._state.conn) return True def _initialize_connection(self, conn): pass def _set_server_version(self, conn): self.server_version = 0 def close(self): with self._lock: if self.deferred: raise InterfaceError('Error, database must be initialized ' 'before opening a connection.') if self.in_transaction(): raise OperationalError('Attempting to close database while ' 'transaction is open.') is_open = not self._state.closed try: if is_open: with __exception_wrapper__: self._close(self._state.conn) finally: self._state.reset() return is_open def _close(self, conn): conn.close() def is_closed(self): return self._state.closed def is_connection_usable(self): return not self._state.closed def connection(self): if self.is_closed(): self.connect() return self._state.conn def cursor(self, commit=None): if self.is_closed(): if self.autoconnect: self.connect() else: raise InterfaceError('Error, database connection not opened.') return self._state.conn.cursor() def execute_sql(self, sql, params=None, commit=SENTINEL): logger.debug((sql, params)) if commit is SENTINEL: if self.in_transaction(): commit = False elif self.commit_select: commit = True else: commit = not sql[:6].lower().startswith('select') with __exception_wrapper__: cursor = self.cursor(commit) try: cursor.execute(sql, params or ()) except Exception: if self.autorollback and not self.in_transaction(): self.rollback() raise else: if commit and not self.in_transaction(): self.commit() return cursor def execute(self, query, commit=SENTINEL, **context_options): ctx = self.get_sql_context(**context_options) sql, params = ctx.sql(query).query() return self.execute_sql(sql, params, commit=commit) def get_context_options(self): return { 'field_types': self._field_types, 'operations': self._operations, 'param': self.param, 'quote': self.quote, 'compound_select_parentheses': self.compound_select_parentheses, 'conflict_statement': self.conflict_statement, 'conflict_update': self.conflict_update, 'for_update': self.for_update, 'index_schema_prefix': self.index_schema_prefix, 'index_using_precedes_table': self.index_using_precedes_table, 'limit_max': self.limit_max, 'nulls_ordering': self.nulls_ordering, } def get_sql_context(self, **context_options): context = self.get_context_options() if context_options: context.update(context_options) return self.context_class(**context) def conflict_statement(self, on_conflict, query): raise NotImplementedError def conflict_update(self, on_conflict, query): raise NotImplementedError def _build_on_conflict_update(self, on_conflict, query): if on_conflict._conflict_target: stmt = SQL('ON CONFLICT') target = EnclosedNodeList([ Entity(col) if isinstance(col, basestring) else col for col in on_conflict._conflict_target]) if on_conflict._conflict_where is not None: target = NodeList([target, SQL('WHERE'), on_conflict._conflict_where]) else: stmt = SQL('ON CONFLICT ON CONSTRAINT') target = on_conflict._conflict_constraint if isinstance(target, basestring): target = Entity(target) updates = [] if on_conflict._preserve: for column in on_conflict._preserve: excluded = NodeList((SQL('EXCLUDED'), ensure_entity(column)), glue='.') expression = NodeList((ensure_entity(column), SQL('='), excluded)) updates.append(expression) if on_conflict._update: for k, v in on_conflict._update.items(): if not isinstance(v, Node): # Attempt to resolve string field-names to their respective # field object, to apply data-type conversions. if isinstance(k, basestring): k = getattr(query.table, k) if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) else: v = QualifiedNames(v) updates.append(NodeList((ensure_entity(k), SQL('='), v))) parts = [stmt, target, SQL('DO UPDATE SET'), CommaNodeList(updates)] if on_conflict._where: parts.extend((SQL('WHERE'), QualifiedNames(on_conflict._where))) return NodeList(parts) def last_insert_id(self, cursor, query_type=None): return cursor.lastrowid def rows_affected(self, cursor): return cursor.rowcount def default_values_insert(self, ctx): return ctx.literal('DEFAULT VALUES') def session_start(self): with self._lock: return self.transaction().__enter__() def session_commit(self): with self._lock: try: txn = self.pop_transaction() except IndexError: return False txn.commit(begin=self.in_transaction()) return True def session_rollback(self): with self._lock: try: txn = self.pop_transaction() except IndexError: return False txn.rollback(begin=self.in_transaction()) return True def in_transaction(self): return bool(self._state.transactions) def push_transaction(self, transaction): self._state.transactions.append(transaction) def pop_transaction(self): return self._state.transactions.pop() def transaction_depth(self): return len(self._state.transactions) def top_transaction(self): if self._state.transactions: return self._state.transactions[-1] def atomic(self, *args, **kwargs): return _atomic(self, *args, **kwargs) def manual_commit(self): return _manual(self) def transaction(self, *args, **kwargs): return _transaction(self, *args, **kwargs) def savepoint(self): return _savepoint(self) def begin(self): if self.is_closed(): self.connect() def commit(self): with __exception_wrapper__: return self._state.conn.commit() def rollback(self): with __exception_wrapper__: return self._state.conn.rollback() def batch_commit(self, it, n): for group in chunked(it, n): with self.atomic(): for obj in group: yield obj def table_exists(self, table_name, schema=None): return table_name in self.get_tables(schema=schema) def get_tables(self, schema=None): raise NotImplementedError def get_indexes(self, table, schema=None): raise NotImplementedError def get_columns(self, table, schema=None): raise NotImplementedError def get_primary_keys(self, table, schema=None): raise NotImplementedError def get_foreign_keys(self, table, schema=None): raise NotImplementedError def sequence_exists(self, seq): raise NotImplementedError def create_tables(self, models, **options): for model in sort_models(models): model.create_table(**options) def drop_tables(self, models, **kwargs): for model in reversed(sort_models(models)): model.drop_table(**kwargs) def extract_date(self, date_part, date_field): raise NotImplementedError def truncate_date(self, date_part, date_field): raise NotImplementedError def to_timestamp(self, date_field): raise NotImplementedError def from_timestamp(self, date_field): raise NotImplementedError def random(self): return fn.random() def bind(self, models, bind_refs=True, bind_backrefs=True): for model in models: model.bind(self, bind_refs=bind_refs, bind_backrefs=bind_backrefs) def bind_ctx(self, models, bind_refs=True, bind_backrefs=True): return _BoundModelsContext(models, self, bind_refs, bind_backrefs) def get_noop_select(self, ctx): return ctx.sql(Select().columns(SQL('0')).where(SQL('0'))) def __pragma__(name): def __get__(self): return self.pragma(name) def __set__(self, value): return self.pragma(name, value) return property(__get__, __set__) class SqliteDatabase(Database): field_types = { 'BIGAUTO': FIELD.AUTO, 'BIGINT': FIELD.INT, 'BOOL': FIELD.INT, 'DOUBLE': FIELD.FLOAT, 'SMALLINT': FIELD.INT, 'UUID': FIELD.TEXT} operations = { 'LIKE': 'GLOB', 'ILIKE': 'LIKE'} index_schema_prefix = True limit_max = -1 server_version = __sqlite_version__ truncate_table = False def __init__(self, database, *args, **kwargs): self._pragmas = kwargs.pop('pragmas', ()) super(SqliteDatabase, self).__init__(database, *args, **kwargs) self._aggregates = {} self._collations = {} self._functions = {} self._window_functions = {} self._table_functions = [] self._extensions = set() self._attached = {} self.register_function(_sqlite_date_part, 'date_part', 2) self.register_function(_sqlite_date_trunc, 'date_trunc', 2) self.nulls_ordering = self.server_version >= (3, 30, 0) def init(self, database, pragmas=None, timeout=5, **kwargs): if pragmas is not None: self._pragmas = pragmas if isinstance(self._pragmas, dict): self._pragmas = list(self._pragmas.items()) self._timeout = timeout super(SqliteDatabase, self).init(database, **kwargs) def _set_server_version(self, conn): pass def _connect(self): if sqlite3 is None: raise ImproperlyConfigured('SQLite driver not installed!') conn = sqlite3.connect(self.database, timeout=self._timeout, isolation_level=None, **self.connect_params) try: self._add_conn_hooks(conn) except: conn.close() raise return conn def _add_conn_hooks(self, conn): if self._attached: self._attach_databases(conn) if self._pragmas: self._set_pragmas(conn) self._load_aggregates(conn) self._load_collations(conn) self._load_functions(conn) if self.server_version >= (3, 25, 0): self._load_window_functions(conn) if self._table_functions: for table_function in self._table_functions: table_function.register(conn) if self._extensions: self._load_extensions(conn) def _set_pragmas(self, conn): cursor = conn.cursor() for pragma, value in self._pragmas: cursor.execute('PRAGMA %s = %s;' % (pragma, value)) cursor.close() def _attach_databases(self, conn): cursor = conn.cursor() for name, db in self._attached.items(): cursor.execute('ATTACH DATABASE "%s" AS "%s"' % (db, name)) cursor.close() def pragma(self, key, value=SENTINEL, permanent=False, schema=None): if schema is not None: key = '"%s".%s' % (schema, key) sql = 'PRAGMA %s' % key if value is not SENTINEL: sql += ' = %s' % (value or 0) if permanent: pragmas = dict(self._pragmas or ()) pragmas[key] = value self._pragmas = list(pragmas.items()) elif permanent: raise ValueError('Cannot specify a permanent pragma without value') row = self.execute_sql(sql).fetchone() if row: return row[0] cache_size = __pragma__('cache_size') foreign_keys = __pragma__('foreign_keys') journal_mode = __pragma__('journal_mode') journal_size_limit = __pragma__('journal_size_limit') mmap_size = __pragma__('mmap_size') page_size = __pragma__('page_size') read_uncommitted = __pragma__('read_uncommitted') synchronous = __pragma__('synchronous') wal_autocheckpoint = __pragma__('wal_autocheckpoint') @property def timeout(self): return self._timeout @timeout.setter def timeout(self, seconds): if self._timeout == seconds: return self._timeout = seconds if not self.is_closed(): # PySQLite multiplies user timeout by 1000, but the unit of the # timeout PRAGMA is actually milliseconds. self.execute_sql('PRAGMA busy_timeout=%d;' % (seconds * 1000)) def _load_aggregates(self, conn): for name, (klass, num_params) in self._aggregates.items(): conn.create_aggregate(name, num_params, klass) def _load_collations(self, conn): for name, fn in self._collations.items(): conn.create_collation(name, fn) def _load_functions(self, conn): for name, (fn, num_params) in self._functions.items(): conn.create_function(name, num_params, fn) def _load_window_functions(self, conn): for name, (klass, num_params) in self._window_functions.items(): conn.create_window_function(name, num_params, klass) def register_aggregate(self, klass, name=None, num_params=-1): self._aggregates[name or klass.__name__.lower()] = (klass, num_params) if not self.is_closed(): self._load_aggregates(self.connection()) def aggregate(self, name=None, num_params=-1): def decorator(klass): self.register_aggregate(klass, name, num_params) return klass return decorator def register_collation(self, fn, name=None): name = name or fn.__name__ def _collation(*args): expressions = args + (SQL('collate %s' % name),) return NodeList(expressions) fn.collation = _collation self._collations[name] = fn if not self.is_closed(): self._load_collations(self.connection()) def collation(self, name=None): def decorator(fn): self.register_collation(fn, name) return fn return decorator def register_function(self, fn, name=None, num_params=-1): self._functions[name or fn.__name__] = (fn, num_params) if not self.is_closed(): self._load_functions(self.connection()) def func(self, name=None, num_params=-1): def decorator(fn): self.register_function(fn, name, num_params) return fn return decorator def register_window_function(self, klass, name=None, num_params=-1): name = name or klass.__name__.lower() self._window_functions[name] = (klass, num_params) if not self.is_closed(): self._load_window_functions(self.connection()) def window_function(self, name=None, num_params=-1): def decorator(klass): self.register_window_function(klass, name, num_params) return klass return decorator def register_table_function(self, klass, name=None): if name is not None: klass.name = name self._table_functions.append(klass) if not self.is_closed(): klass.register(self.connection()) def table_function(self, name=None): def decorator(klass): self.register_table_function(klass, name) return klass return decorator def unregister_aggregate(self, name): del(self._aggregates[name]) def unregister_collation(self, name): del(self._collations[name]) def unregister_function(self, name): del(self._functions[name]) def unregister_window_function(self, name): del(self._window_functions[name]) def unregister_table_function(self, name): for idx, klass in enumerate(self._table_functions): if klass.name == name: break else: return False self._table_functions.pop(idx) return True def _load_extensions(self, conn): conn.enable_load_extension(True) for extension in self._extensions: conn.load_extension(extension) def load_extension(self, extension): self._extensions.add(extension) if not self.is_closed(): conn = self.connection() conn.enable_load_extension(True) conn.load_extension(extension) def unload_extension(self, extension): self._extensions.remove(extension) def attach(self, filename, name): if name in self._attached: if self._attached[name] == filename: return False raise OperationalError('schema "%s" already attached.' % name) self._attached[name] = filename if not self.is_closed(): self.execute_sql('ATTACH DATABASE "%s" AS "%s"' % (filename, name)) return True def detach(self, name): if name not in self._attached: return False del self._attached[name] if not self.is_closed(): self.execute_sql('DETACH DATABASE "%s"' % name) return True def begin(self, lock_type=None): statement = 'BEGIN %s' % lock_type if lock_type else 'BEGIN' self.execute_sql(statement, commit=False) def get_tables(self, schema=None): schema = schema or 'main' cursor = self.execute_sql('SELECT name FROM "%s".sqlite_master WHERE ' 'type=? ORDER BY name' % schema, ('table',)) return [row for row, in cursor.fetchall()] def get_views(self, schema=None): sql = ('SELECT name, sql FROM "%s".sqlite_master WHERE type=? ' 'ORDER BY name') % (schema or 'main') return [ViewMetadata(*row) for row in self.execute_sql(sql, ('view',))] def get_indexes(self, table, schema=None): schema = schema or 'main' query = ('SELECT name, sql FROM "%s".sqlite_master ' 'WHERE tbl_name = ? AND type = ? ORDER BY name') % schema cursor = self.execute_sql(query, (table, 'index')) index_to_sql = dict(cursor.fetchall()) # Determine which indexes have a unique constraint. unique_indexes = set() cursor = self.execute_sql('PRAGMA "%s".index_list("%s")' % (schema, table)) for row in cursor.fetchall(): name = row[1] is_unique = int(row[2]) == 1 if is_unique: unique_indexes.add(name) # Retrieve the indexed columns. index_columns = {} for index_name in sorted(index_to_sql): cursor = self.execute_sql('PRAGMA "%s".index_info("%s")' % (schema, index_name)) index_columns[index_name] = [row[2] for row in cursor.fetchall()] return [ IndexMetadata( name, index_to_sql[name], index_columns[name], name in unique_indexes, table) for name in sorted(index_to_sql)] def get_columns(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' % (schema or 'main', table)) return [ColumnMetadata(r[1], r[2], not r[3], bool(r[5]), table, r[4]) for r in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' % (schema or 'main', table)) return [row[1] for row in filter(lambda r: r[-1], cursor.fetchall())] def get_foreign_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".foreign_key_list("%s")' % (schema or 'main', table)) return [ForeignKeyMetadata(row[3], row[2], row[4], table) for row in cursor.fetchall()] def get_binary_type(self): return sqlite3.Binary def conflict_statement(self, on_conflict, query): action = on_conflict._action.lower() if on_conflict._action else '' if action and action not in ('nothing', 'update'): return SQL('INSERT OR %s' % on_conflict._action.upper()) def conflict_update(self, oc, query): # Sqlite prior to 3.24.0 does not support Postgres-style upsert. if self.server_version < (3, 24, 0) and \ any((oc._preserve, oc._update, oc._where, oc._conflict_target, oc._conflict_constraint)): raise ValueError('SQLite does not support specifying which values ' 'to preserve or update.') action = oc._action.lower() if oc._action else '' if action and action not in ('nothing', 'update', ''): return if action == 'nothing': return SQL('ON CONFLICT DO NOTHING') elif not oc._update and not oc._preserve: raise ValueError('If you are not performing any updates (or ' 'preserving any INSERTed values), then the ' 'conflict resolution action should be set to ' '"NOTHING".') elif oc._conflict_constraint: raise ValueError('SQLite does not support specifying named ' 'constraints for conflict resolution.') elif not oc._conflict_target: raise ValueError('SQLite requires that a conflict target be ' 'specified when doing an upsert.') return self._build_on_conflict_update(oc, query) def extract_date(self, date_part, date_field): return fn.date_part(date_part, date_field, python_value=int) def truncate_date(self, date_part, date_field): return fn.date_trunc(date_part, date_field, python_value=simple_date_time) def to_timestamp(self, date_field): return fn.strftime('%s', date_field).cast('integer') def from_timestamp(self, date_field): return fn.datetime(date_field, 'unixepoch') class PostgresqlDatabase(Database): field_types = { 'AUTO': 'SERIAL', 'BIGAUTO': 'BIGSERIAL', 'BLOB': 'BYTEA', 'BOOL': 'BOOLEAN', 'DATETIME': 'TIMESTAMP', 'DECIMAL': 'NUMERIC', 'DOUBLE': 'DOUBLE PRECISION', 'UUID': 'UUID', 'UUIDB': 'BYTEA'} operations = {'REGEXP': '~', 'IREGEXP': '~*'} param = '%s' commit_select = True compound_select_parentheses = CSQ_PARENTHESES_ALWAYS for_update = True nulls_ordering = True returning_clause = True safe_create_index = False sequences = True def init(self, database, register_unicode=True, encoding=None, isolation_level=None, **kwargs): self._register_unicode = register_unicode self._encoding = encoding self._isolation_level = isolation_level super(PostgresqlDatabase, self).init(database, **kwargs) def _connect(self): if psycopg2 is None: raise ImproperlyConfigured('Postgres driver not installed!') conn = psycopg2.connect(database=self.database, **self.connect_params) if self._register_unicode: pg_extensions.register_type(pg_extensions.UNICODE, conn) pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn) if self._encoding: conn.set_client_encoding(self._encoding) if self._isolation_level: conn.set_isolation_level(self._isolation_level) return conn def _set_server_version(self, conn): self.server_version = conn.server_version if self.server_version >= 90600: self.safe_create_index = True def is_connection_usable(self): if self._state.closed: return False # Returns True if we are idle, running a command, or in an active # connection. If the connection is in an error state or the connection # is otherwise unusable, return False. txn_status = self._state.conn.get_transaction_status() return txn_status < pg_extensions.TRANSACTION_STATUS_INERROR def last_insert_id(self, cursor, query_type=None): try: return cursor if query_type != Insert.SIMPLE else cursor[0][0] except (IndexError, KeyError, TypeError): pass def get_tables(self, schema=None): query = ('SELECT tablename FROM pg_catalog.pg_tables ' 'WHERE schemaname = %s ORDER BY tablename') cursor = self.execute_sql(query, (schema or 'public',)) return [table for table, in cursor.fetchall()] def get_views(self, schema=None): query = ('SELECT viewname, definition FROM pg_catalog.pg_views ' 'WHERE schemaname = %s ORDER BY viewname') cursor = self.execute_sql(query, (schema or 'public',)) return [ViewMetadata(view_name, sql.strip(' \t;')) for (view_name, sql) in cursor.fetchall()] def get_indexes(self, table, schema=None): query = """ SELECT i.relname, idxs.indexdef, idx.indisunique, array_to_string(ARRAY( SELECT pg_get_indexdef(idx.indexrelid, k + 1, TRUE) FROM generate_subscripts(idx.indkey, 1) AS k ORDER BY k), ',') FROM pg_catalog.pg_class AS t INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid INNER JOIN pg_catalog.pg_indexes AS idxs ON (idxs.tablename = t.relname AND idxs.indexname = i.relname) WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s ORDER BY idx.indisunique DESC, i.relname;""" cursor = self.execute_sql(query, (table, 'r', schema or 'public')) return [IndexMetadata(name, sql.rstrip(' ;'), columns.split(','), is_unique, table) for name, sql, is_unique, columns in cursor.fetchall()] def get_columns(self, table, schema=None): query = """ SELECT column_name, is_nullable, data_type, column_default FROM information_schema.columns WHERE table_name = %s AND table_schema = %s ORDER BY ordinal_position""" cursor = self.execute_sql(query, (table, schema or 'public')) pks = set(self.get_primary_keys(table, schema)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df) for name, null, dt, df in cursor.fetchall()] def get_primary_keys(self, table, schema=None): query = """ SELECT kc.column_name FROM information_schema.table_constraints AS tc INNER JOIN information_schema.key_column_usage AS kc ON ( tc.table_name = kc.table_name AND tc.table_schema = kc.table_schema AND tc.constraint_name = kc.constraint_name) WHERE tc.constraint_type = %s AND tc.table_name = %s AND tc.table_schema = %s""" ctype = 'PRIMARY KEY' cursor = self.execute_sql(query, (ctype, table, schema or 'public')) return [pk for pk, in cursor.fetchall()] def get_foreign_keys(self, table, schema=None): sql = """ SELECT DISTINCT kcu.column_name, ccu.table_name, ccu.column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON (tc.constraint_name = kcu.constraint_name AND tc.constraint_schema = kcu.constraint_schema AND tc.table_name = kcu.table_name AND tc.table_schema = kcu.table_schema) JOIN information_schema.constraint_column_usage AS ccu ON (ccu.constraint_name = tc.constraint_name AND ccu.constraint_schema = tc.constraint_schema) WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = %s AND tc.table_schema = %s""" cursor = self.execute_sql(sql, (table, schema or 'public')) return [ForeignKeyMetadata(row[0], row[1], row[2], table) for row in cursor.fetchall()] def sequence_exists(self, sequence): res = self.execute_sql(""" SELECT COUNT(*) FROM pg_class, pg_namespace WHERE relkind='S' AND pg_class.relnamespace = pg_namespace.oid AND relname=%s""", (sequence,)) return bool(res.fetchone()[0]) def get_binary_type(self): return psycopg2.Binary def conflict_statement(self, on_conflict, query): return def conflict_update(self, oc, query): action = oc._action.lower() if oc._action else '' if action in ('ignore', 'nothing'): parts = [SQL('ON CONFLICT')] if oc._conflict_target: parts.append(EnclosedNodeList([ Entity(col) if isinstance(col, basestring) else col for col in oc._conflict_target])) parts.append(SQL('DO NOTHING')) return NodeList(parts) elif action and action != 'update': raise ValueError('The only supported actions for conflict ' 'resolution with Postgresql are "ignore" or ' '"update".') elif not oc._update and not oc._preserve: raise ValueError('If you are not performing any updates (or ' 'preserving any INSERTed values), then the ' 'conflict resolution action should be set to ' '"IGNORE".') elif not (oc._conflict_target or oc._conflict_constraint): raise ValueError('Postgres requires that a conflict target be ' 'specified when doing an upsert.') return self._build_on_conflict_update(oc, query) def extract_date(self, date_part, date_field): return fn.EXTRACT(NodeList((date_part, SQL('FROM'), date_field))) def truncate_date(self, date_part, date_field): return fn.DATE_TRUNC(date_part, date_field) def to_timestamp(self, date_field): return self.extract_date('EPOCH', date_field) def from_timestamp(self, date_field): # Ironically, here, Postgres means "to the Postgresql timestamp type". return fn.to_timestamp(date_field) def get_noop_select(self, ctx): return ctx.sql(Select().columns(SQL('0')).where(SQL('false'))) def set_time_zone(self, timezone): self.execute_sql('set time zone "%s";' % timezone) class MySQLDatabase(Database): field_types = { 'AUTO': 'INTEGER AUTO_INCREMENT', 'BIGAUTO': 'BIGINT AUTO_INCREMENT', 'BOOL': 'BOOL', 'DECIMAL': 'NUMERIC', 'DOUBLE': 'DOUBLE PRECISION', 'FLOAT': 'FLOAT', 'UUID': 'VARCHAR(40)', 'UUIDB': 'VARBINARY(16)'} operations = { 'LIKE': 'LIKE BINARY', 'ILIKE': 'LIKE', 'REGEXP': 'REGEXP BINARY', 'IREGEXP': 'REGEXP', 'XOR': 'XOR'} param = '%s' quote = '``' commit_select = True compound_select_parentheses = CSQ_PARENTHESES_UNNESTED for_update = True index_using_precedes_table = True limit_max = 2 ** 64 - 1 safe_create_index = False safe_drop_index = False sql_mode = 'PIPES_AS_CONCAT' def init(self, database, **kwargs): params = { 'charset': 'utf8', 'sql_mode': self.sql_mode, 'use_unicode': True} params.update(kwargs) if 'password' in params and mysql_passwd: params['passwd'] = params.pop('password') super(MySQLDatabase, self).init(database, **params) def _connect(self): if mysql is None: raise ImproperlyConfigured('MySQL driver not installed!') conn = mysql.connect(db=self.database, **self.connect_params) return conn def _set_server_version(self, conn): try: version_raw = conn.server_version except AttributeError: version_raw = conn.get_server_info() self.server_version = self._extract_server_version(version_raw) def _extract_server_version(self, version): version = version.lower() if 'maria' in version: match_obj = re.search(r'(1\d\.\d+\.\d+)', version) else: match_obj = re.search(r'(\d\.\d+\.\d+)', version) if match_obj is not None: return tuple(int(num) for num in match_obj.groups()[0].split('.')) warnings.warn('Unable to determine MySQL version: "%s"' % version) return (0, 0, 0) # Unable to determine version! def default_values_insert(self, ctx): return ctx.literal('() VALUES ()') def get_tables(self, schema=None): query = ('SELECT table_name FROM information_schema.tables ' 'WHERE table_schema = DATABASE() AND table_type != %s ' 'ORDER BY table_name') return [table for table, in self.execute_sql(query, ('VIEW',))] def get_views(self, schema=None): query = ('SELECT table_name, view_definition ' 'FROM information_schema.views ' 'WHERE table_schema = DATABASE() ORDER BY table_name') cursor = self.execute_sql(query) return [ViewMetadata(*row) for row in cursor.fetchall()] def get_indexes(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) unique = set() indexes = {} for row in cursor.fetchall(): if not row[1]: unique.add(row[2]) indexes.setdefault(row[2], []) indexes[row[2]].append(row[4]) return [IndexMetadata(name, None, indexes[name], name in unique, table) for name in indexes] def get_columns(self, table, schema=None): sql = """ SELECT column_name, is_nullable, data_type, column_default FROM information_schema.columns WHERE table_name = %s AND table_schema = DATABASE()""" cursor = self.execute_sql(sql, (table,)) pks = set(self.get_primary_keys(table)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df) for name, null, dt, df in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) return [row[4] for row in filter(lambda row: row[2] == 'PRIMARY', cursor.fetchall())] def get_foreign_keys(self, table, schema=None): query = """ SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""" cursor = self.execute_sql(query, (table,)) return [ ForeignKeyMetadata(column, dest_table, dest_column, table) for column, dest_table, dest_column in cursor.fetchall()] def get_binary_type(self): return mysql.Binary def conflict_statement(self, on_conflict, query): if not on_conflict._action: return action = on_conflict._action.lower() if action == 'replace': return SQL('REPLACE') elif action == 'ignore': return SQL('INSERT IGNORE') elif action != 'update': raise ValueError('Un-supported action for conflict resolution. ' 'MySQL supports REPLACE, IGNORE and UPDATE.') def conflict_update(self, on_conflict, query): if on_conflict._where or on_conflict._conflict_target or \ on_conflict._conflict_constraint: raise ValueError('MySQL does not support the specification of ' 'where clauses or conflict targets for conflict ' 'resolution.') updates = [] if on_conflict._preserve: # Here we need to determine which function to use, which varies # depending on the MySQL server version. MySQL and MariaDB prior to # 10.3.3 use "VALUES", while MariaDB 10.3.3+ use "VALUE". version = self.server_version or (0,) if version[0] == 10 and version >= (10, 3, 3): VALUE_FN = fn.VALUE else: VALUE_FN = fn.VALUES for column in on_conflict._preserve: entity = ensure_entity(column) expression = NodeList(( ensure_entity(column), SQL('='), VALUE_FN(entity))) updates.append(expression) if on_conflict._update: for k, v in on_conflict._update.items(): if not isinstance(v, Node): # Attempt to resolve string field-names to their respective # field object, to apply data-type conversions. if isinstance(k, basestring): k = getattr(query.table, k) if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) updates.append(NodeList((ensure_entity(k), SQL('='), v))) if updates: return NodeList((SQL('ON DUPLICATE KEY UPDATE'), CommaNodeList(updates))) def extract_date(self, date_part, date_field): return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field))) def truncate_date(self, date_part, date_field): return fn.DATE_FORMAT(date_field, __mysql_date_trunc__[date_part], python_value=simple_date_time) def to_timestamp(self, date_field): return fn.UNIX_TIMESTAMP(date_field) def from_timestamp(self, date_field): return fn.FROM_UNIXTIME(date_field) def random(self): return fn.rand() def get_noop_select(self, ctx): return ctx.literal('DO 0') # TRANSACTION CONTROL. class _manual(_callable_context_manager): def __init__(self, db): self.db = db def __enter__(self): top = self.db.top_transaction() if top is not None and not isinstance(top, _manual): raise ValueError('Cannot enter manual commit block while a ' 'transaction is active.') self.db.push_transaction(self) def __exit__(self, exc_type, exc_val, exc_tb): if self.db.pop_transaction() is not self: raise ValueError('Transaction stack corrupted while exiting ' 'manual commit block.') class _atomic(_callable_context_manager): def __init__(self, db, *args, **kwargs): self.db = db self._transaction_args = (args, kwargs) def __enter__(self): if self.db.transaction_depth() == 0: args, kwargs = self._transaction_args self._helper = self.db.transaction(*args, **kwargs) elif isinstance(self.db.top_transaction(), _manual): raise ValueError('Cannot enter atomic commit block while in ' 'manual commit mode.') else: self._helper = self.db.savepoint() return self._helper.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): return self._helper.__exit__(exc_type, exc_val, exc_tb) class _transaction(_callable_context_manager): def __init__(self, db, *args, **kwargs): self.db = db self._begin_args = (args, kwargs) def _begin(self): args, kwargs = self._begin_args self.db.begin(*args, **kwargs) def commit(self, begin=True): self.db.commit() if begin: self._begin() def rollback(self, begin=True): self.db.rollback() if begin: self._begin() def __enter__(self): if self.db.transaction_depth() == 0: self._begin() self.db.push_transaction(self) return self def __exit__(self, exc_type, exc_val, exc_tb): try: if exc_type: self.rollback(False) elif self.db.transaction_depth() == 1: try: self.commit(False) except: self.rollback(False) raise finally: self.db.pop_transaction() class _savepoint(_callable_context_manager): def __init__(self, db, sid=None): self.db = db self.sid = sid or 's' + uuid.uuid4().hex self.quoted_sid = self.sid.join(self.db.quote) def _begin(self): self.db.execute_sql('SAVEPOINT %s;' % self.quoted_sid) def commit(self, begin=True): self.db.execute_sql('RELEASE SAVEPOINT %s;' % self.quoted_sid) if begin: self._begin() def rollback(self): self.db.execute_sql('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid) def __enter__(self): self._begin() return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type: self.rollback() else: try: self.commit(begin=False) except: self.rollback() raise # CURSOR REPRESENTATIONS. class CursorWrapper(object): def __init__(self, cursor): self.cursor = cursor self.count = 0 self.index = 0 self.initialized = False self.populated = False self.row_cache = [] def __iter__(self): if self.populated: return iter(self.row_cache) return ResultIterator(self) def __getitem__(self, item): if isinstance(item, slice): stop = item.stop if stop is None or stop < 0: self.fill_cache() else: self.fill_cache(stop) return self.row_cache[item] elif isinstance(item, int): self.fill_cache(item if item > 0 else 0) return self.row_cache[item] else: raise ValueError('CursorWrapper only supports integer and slice ' 'indexes.') def __len__(self): self.fill_cache() return self.count def initialize(self): pass def iterate(self, cache=True): row = self.cursor.fetchone() if row is None: self.populated = True self.cursor.close() raise StopIteration elif not self.initialized: self.initialize() # Lazy initialization. self.initialized = True self.count += 1 result = self.process_row(row) if cache: self.row_cache.append(result) return result def process_row(self, row): return row def iterator(self): """Efficient one-pass iteration over the result set.""" while True: try: yield self.iterate(False) except StopIteration: return def fill_cache(self, n=0): n = n or float('Inf') if n < 0: raise ValueError('Negative values are not supported.') iterator = ResultIterator(self) iterator.index = self.count while not self.populated and (n > self.count): try: iterator.next() except StopIteration: break class DictCursorWrapper(CursorWrapper): def _initialize_columns(self): description = self.cursor.description self.columns = [t[0][t[0].find('.') + 1:].strip('")') for t in description] self.ncols = len(description) initialize = _initialize_columns def _row_to_dict(self, row): result = {} for i in range(self.ncols): result.setdefault(self.columns[i], row[i]) # Do not overwrite. return result process_row = _row_to_dict class NamedTupleCursorWrapper(CursorWrapper): def initialize(self): description = self.cursor.description self.tuple_class = collections.namedtuple( 'Row', [col[0][col[0].find('.') + 1:].strip('"') for col in description]) def process_row(self, row): return self.tuple_class(*row) class ObjectCursorWrapper(DictCursorWrapper): def __init__(self, cursor, constructor): super(ObjectCursorWrapper, self).__init__(cursor) self.constructor = constructor def process_row(self, row): row_dict = self._row_to_dict(row) return self.constructor(**row_dict) class ResultIterator(object): def __init__(self, cursor_wrapper): self.cursor_wrapper = cursor_wrapper self.index = 0 def __iter__(self): return self def next(self): if self.index < self.cursor_wrapper.count: obj = self.cursor_wrapper.row_cache[self.index] elif not self.cursor_wrapper.populated: self.cursor_wrapper.iterate() obj = self.cursor_wrapper.row_cache[self.index] else: raise StopIteration self.index += 1 return obj __next__ = next # FIELDS class FieldAccessor(object): def __init__(self, model, field, name): self.model = model self.field = field self.name = name def __get__(self, instance, instance_type=None): if instance is not None: return instance.__data__.get(self.name) return self.field def __set__(self, instance, value): instance.__data__[self.name] = value instance._dirty.add(self.name) class ForeignKeyAccessor(FieldAccessor): def __init__(self, model, field, name): super(ForeignKeyAccessor, self).__init__(model, field, name) self.rel_model = field.rel_model def get_rel_instance(self, instance): value = instance.__data__.get(self.name) if value is not None or self.name in instance.__rel__: if self.name not in instance.__rel__ and self.field.lazy_load: obj = self.rel_model.get(self.field.rel_field == value) instance.__rel__[self.name] = obj return instance.__rel__.get(self.name, value) elif not self.field.null and self.field.lazy_load: raise self.rel_model.DoesNotExist return value def __get__(self, instance, instance_type=None): if instance is not None: return self.get_rel_instance(instance) return self.field def __set__(self, instance, obj): if isinstance(obj, self.rel_model): instance.__data__[self.name] = getattr(obj, self.field.rel_field.name) instance.__rel__[self.name] = obj else: fk_value = instance.__data__.get(self.name) instance.__data__[self.name] = obj if (obj != fk_value or obj is None) and \ self.name in instance.__rel__: del instance.__rel__[self.name] instance._dirty.add(self.name) class BackrefAccessor(object): def __init__(self, field): self.field = field self.model = field.rel_model self.rel_model = field.model def __get__(self, instance, instance_type=None): if instance is not None: dest = self.field.rel_field.name return (self.rel_model .select() .where(self.field == getattr(instance, dest))) return self class ObjectIdAccessor(object): """Gives direct access to the underlying id""" def __init__(self, field): self.field = field def __get__(self, instance, instance_type=None): if instance is not None: value = instance.__data__.get(self.field.name) # Pull the object-id from the related object if it is not set. if value is None and self.field.name in instance.__rel__: rel_obj = instance.__rel__[self.field.name] value = getattr(rel_obj, self.field.rel_field.name) return value return self.field def __set__(self, instance, value): setattr(instance, self.field.name, value) class Field(ColumnBase): _field_counter = 0 _order = 0 accessor_class = FieldAccessor auto_increment = False default_index_type = None field_type = 'DEFAULT' unpack = True def __init__(self, null=False, index=False, unique=False, column_name=None, default=None, primary_key=False, constraints=None, sequence=None, collation=None, unindexed=False, choices=None, help_text=None, verbose_name=None, index_type=None, db_column=None, _hidden=False): if db_column is not None: __deprecated__('"db_column" has been deprecated in favor of ' '"column_name" for Field objects.') column_name = db_column self.null = null self.index = index self.unique = unique self.column_name = column_name self.default = default self.primary_key = primary_key self.constraints = constraints # List of column constraints. self.sequence = sequence # Name of sequence, e.g. foo_id_seq. self.collation = collation self.unindexed = unindexed self.choices = choices self.help_text = help_text self.verbose_name = verbose_name self.index_type = index_type or self.default_index_type self._hidden = _hidden # Used internally for recovering the order in which Fields were defined # on the Model class. Field._field_counter += 1 self._order = Field._field_counter self._sort_key = (self.primary_key and 1 or 2), self._order def __hash__(self): return hash(self.name + '.' + self.model.__name__) def __repr__(self): if hasattr(self, 'model') and getattr(self, 'name', None): return '<%s: %s.%s>' % (type(self).__name__, self.model.__name__, self.name) return '<%s: (unbound)>' % type(self).__name__ def bind(self, model, name, set_attribute=True): self.model = model self.name = self.safe_name = name self.column_name = self.column_name or name if set_attribute: setattr(model, name, self.accessor_class(model, self, name)) @property def column(self): return Column(self.model._meta.table, self.column_name) def adapt(self, value): return value def db_value(self, value): return value if value is None else self.adapt(value) def python_value(self, value): return value if value is None else self.adapt(value) def to_value(self, value): return Value(value, self.db_value, unpack=False) def get_sort_key(self, ctx): return self._sort_key def __sql__(self, ctx): return ctx.sql(self.column) def get_modifiers(self): pass def ddl_datatype(self, ctx): if ctx and ctx.state.field_types: column_type = ctx.state.field_types.get(self.field_type, self.field_type) else: column_type = self.field_type modifiers = self.get_modifiers() if column_type and modifiers: modifier_literal = ', '.join([str(m) for m in modifiers]) return SQL('%s(%s)' % (column_type, modifier_literal)) else: return SQL(column_type) def ddl(self, ctx): accum = [Entity(self.column_name)] data_type = self.ddl_datatype(ctx) if data_type: accum.append(data_type) if self.unindexed: accum.append(SQL('UNINDEXED')) if not self.null: accum.append(SQL('NOT NULL')) if self.primary_key: accum.append(SQL('PRIMARY KEY')) if self.sequence: accum.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence)) if self.constraints: accum.extend(self.constraints) if self.collation: accum.append(SQL('COLLATE %s' % self.collation)) return NodeList(accum) class IntegerField(Field): field_type = 'INT' def adapt(self, value): try: return int(value) except ValueError: return value class BigIntegerField(IntegerField): field_type = 'BIGINT' class SmallIntegerField(IntegerField): field_type = 'SMALLINT' class AutoField(IntegerField): auto_increment = True field_type = 'AUTO' def __init__(self, *args, **kwargs): if kwargs.get('primary_key') is False: raise ValueError('%s must always be a primary key.' % type(self)) kwargs['primary_key'] = True super(AutoField, self).__init__(*args, **kwargs) class BigAutoField(AutoField): field_type = 'BIGAUTO' class IdentityField(AutoField): field_type = 'INT GENERATED BY DEFAULT AS IDENTITY' def __init__(self, generate_always=False, **kwargs): if generate_always: self.field_type = 'INT GENERATED ALWAYS AS IDENTITY' super(IdentityField, self).__init__(**kwargs) class PrimaryKeyField(AutoField): def __init__(self, *args, **kwargs): __deprecated__('"PrimaryKeyField" has been renamed to "AutoField". ' 'Please update your code accordingly as this will be ' 'completely removed in a subsequent release.') super(PrimaryKeyField, self).__init__(*args, **kwargs) class FloatField(Field): field_type = 'FLOAT' def adapt(self, value): try: return float(value) except ValueError: return value class DoubleField(FloatField): field_type = 'DOUBLE' class DecimalField(Field): field_type = 'DECIMAL' def __init__(self, max_digits=10, decimal_places=5, auto_round=False, rounding=None, *args, **kwargs): self.max_digits = max_digits self.decimal_places = decimal_places self.auto_round = auto_round self.rounding = rounding or decimal.DefaultContext.rounding self._exp = decimal.Decimal(10) ** (-self.decimal_places) super(DecimalField, self).__init__(*args, **kwargs) def get_modifiers(self): return [self.max_digits, self.decimal_places] def db_value(self, value): D = decimal.Decimal if not value: return value if value is None else D(0) if self.auto_round: decimal_value = D(text_type(value)) return decimal_value.quantize(self._exp, rounding=self.rounding) return value def python_value(self, value): if value is not None: if isinstance(value, decimal.Decimal): return value return decimal.Decimal(text_type(value)) class _StringField(Field): def adapt(self, value): if isinstance(value, text_type): return value elif isinstance(value, bytes_type): return value.decode('utf-8') return text_type(value) def __add__(self, other): return StringExpression(self, OP.CONCAT, other) def __radd__(self, other): return StringExpression(other, OP.CONCAT, self) class CharField(_StringField): field_type = 'VARCHAR' def __init__(self, max_length=255, *args, **kwargs): self.max_length = max_length super(CharField, self).__init__(*args, **kwargs) def get_modifiers(self): return self.max_length and [self.max_length] or None class FixedCharField(CharField): field_type = 'CHAR' def python_value(self, value): value = super(FixedCharField, self).python_value(value) if value: value = value.strip() return value class TextField(_StringField): field_type = 'TEXT' class BlobField(Field): field_type = 'BLOB' def _db_hook(self, database): if database is None: self._constructor = bytearray else: self._constructor = database.get_binary_type() def bind(self, model, name, set_attribute=True): self._constructor = bytearray if model._meta.database: if isinstance(model._meta.database, Proxy): model._meta.database.attach_callback(self._db_hook) else: self._db_hook(model._meta.database) # Attach a hook to the model metadata; in the event the database is # changed or set at run-time, we will be sure to apply our callback and # use the proper data-type for our database driver. model._meta._db_hooks.append(self._db_hook) return super(BlobField, self).bind(model, name, set_attribute) def db_value(self, value): if isinstance(value, text_type): value = value.encode('raw_unicode_escape') if isinstance(value, bytes_type): return self._constructor(value) return value class BitField(BitwiseMixin, BigIntegerField): def __init__(self, *args, **kwargs): kwargs.setdefault('default', 0) super(BitField, self).__init__(*args, **kwargs) self.__current_flag = 1 def flag(self, value=None): if value is None: value = self.__current_flag self.__current_flag <<= 1 else: self.__current_flag = value << 1 class FlagDescriptor(ColumnBase): def __init__(self, field, value): self._field = field self._value = value super(FlagDescriptor, self).__init__() def clear(self): return self._field.bin_and(~self._value) def set(self): return self._field.bin_or(self._value) def __get__(self, instance, instance_type=None): if instance is None: return self value = getattr(instance, self._field.name) or 0 return (value & self._value) != 0 def __set__(self, instance, is_set): if is_set not in (True, False): raise ValueError('Value must be either True or False') value = getattr(instance, self._field.name) or 0 if is_set: value |= self._value else: value &= ~self._value setattr(instance, self._field.name, value) def __sql__(self, ctx): return ctx.sql(self._field.bin_and(self._value) != 0) return FlagDescriptor(self, value) class BigBitFieldData(object): def __init__(self, instance, name): self.instance = instance self.name = name value = self.instance.__data__.get(self.name) if not value: value = bytearray() elif not isinstance(value, bytearray): value = bytearray(value) self._buffer = self.instance.__data__[self.name] = value def _ensure_length(self, idx): byte_num, byte_offset = divmod(idx, 8) cur_size = len(self._buffer) if cur_size <= byte_num: self._buffer.extend(b'\x00' * ((byte_num + 1) - cur_size)) return byte_num, byte_offset def set_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] |= (1 << byte_offset) def clear_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] &= ~(1 << byte_offset) def toggle_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] ^= (1 << byte_offset) return bool(self._buffer[byte_num] & (1 << byte_offset)) def is_set(self, idx): byte_num, byte_offset = self._ensure_length(idx) return bool(self._buffer[byte_num] & (1 << byte_offset)) def __repr__(self): return repr(self._buffer) class BigBitFieldAccessor(FieldAccessor): def __get__(self, instance, instance_type=None): if instance is None: return self.field return BigBitFieldData(instance, self.name) def __set__(self, instance, value): if isinstance(value, memoryview): value = value.tobytes() elif isinstance(value, buffer_type): value = bytes(value) elif isinstance(value, bytearray): value = bytes_type(value) elif isinstance(value, BigBitFieldData): value = bytes_type(value._buffer) elif isinstance(value, text_type): value = value.encode('utf-8') elif not isinstance(value, bytes_type): raise ValueError('Value must be either a bytes, memoryview or ' 'BigBitFieldData instance.') super(BigBitFieldAccessor, self).__set__(instance, value) class BigBitField(BlobField): accessor_class = BigBitFieldAccessor def __init__(self, *args, **kwargs): kwargs.setdefault('default', bytes_type) super(BigBitField, self).__init__(*args, **kwargs) def db_value(self, value): return bytes_type(value) if value is not None else value class UUIDField(Field): field_type = 'UUID' def db_value(self, value): if isinstance(value, basestring) and len(value) == 32: # Hex string. No transformation is necessary. return value elif isinstance(value, bytes) and len(value) == 16: # Allow raw binary representation. value = uuid.UUID(bytes=value) if isinstance(value, uuid.UUID): return value.hex try: return uuid.UUID(value).hex except: return value def python_value(self, value): if isinstance(value, uuid.UUID): return value return uuid.UUID(value) if value is not None else None class BinaryUUIDField(BlobField): field_type = 'UUIDB' def db_value(self, value): if isinstance(value, bytes) and len(value) == 16: # Raw binary value. No transformation is necessary. return self._constructor(value) elif isinstance(value, basestring) and len(value) == 32: # Allow hex string representation. value = uuid.UUID(hex=value) if isinstance(value, uuid.UUID): return self._constructor(value.bytes) elif value is not None: raise ValueError('value for binary UUID field must be UUID(), ' 'a hexadecimal string, or a bytes object.') def python_value(self, value): if isinstance(value, uuid.UUID): return value elif isinstance(value, memoryview): value = value.tobytes() elif value and not isinstance(value, bytes): value = bytes(value) return uuid.UUID(bytes=value) if value is not None else None def _date_part(date_part): def dec(self): return self.model._meta.database.extract_date(date_part, self) return dec def format_date_time(value, formats, post_process=None): post_process = post_process or (lambda x: x) for fmt in formats: try: return post_process(datetime.datetime.strptime(value, fmt)) except ValueError: pass return value def simple_date_time(value): try: return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S') except (TypeError, ValueError): return value class _BaseFormattedField(Field): formats = None def __init__(self, formats=None, *args, **kwargs): if formats is not None: self.formats = formats super(_BaseFormattedField, self).__init__(*args, **kwargs) class DateTimeField(_BaseFormattedField): field_type = 'DATETIME' formats = [ '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', ] def adapt(self, value): if value and isinstance(value, basestring): return format_date_time(value, self.formats) return value def to_timestamp(self): return self.model._meta.database.to_timestamp(self) def truncate(self, part): return self.model._meta.database.truncate_date(part, self) year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) class DateField(_BaseFormattedField): field_type = 'DATE' formats = [ '%Y-%m-%d', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', ] def adapt(self, value): if value and isinstance(value, basestring): pp = lambda x: x.date() return format_date_time(value, self.formats, pp) elif value and isinstance(value, datetime.datetime): return value.date() return value def to_timestamp(self): return self.model._meta.database.to_timestamp(self) def truncate(self, part): return self.model._meta.database.truncate_date(part, self) year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) class TimeField(_BaseFormattedField): field_type = 'TIME' formats = [ '%H:%M:%S.%f', '%H:%M:%S', '%H:%M', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', ] def adapt(self, value): if value: if isinstance(value, basestring): pp = lambda x: x.time() return format_date_time(value, self.formats, pp) elif isinstance(value, datetime.datetime): return value.time() if value is not None and isinstance(value, datetime.timedelta): return (datetime.datetime.min + value).time() return value hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) def _timestamp_date_part(date_part): def dec(self): db = self.model._meta.database expr = ((self / Value(self.resolution, converter=False)) if self.resolution > 1 else self) return db.extract_date(date_part, db.from_timestamp(expr)) return dec class TimestampField(BigIntegerField): # Support second -> microsecond resolution. valid_resolutions = [10**i for i in range(7)] def __init__(self, *args, **kwargs): self.resolution = kwargs.pop('resolution', None) if not self.resolution: self.resolution = 1 elif self.resolution in range(2, 7): self.resolution = 10 ** self.resolution elif self.resolution not in self.valid_resolutions: raise ValueError('TimestampField resolution must be one of: %s' % ', '.join(str(i) for i in self.valid_resolutions)) self.ticks_to_microsecond = 1000000 // self.resolution self.utc = kwargs.pop('utc', False) or False dflt = datetime.datetime.utcnow if self.utc else datetime.datetime.now kwargs.setdefault('default', dflt) super(TimestampField, self).__init__(*args, **kwargs) def local_to_utc(self, dt): # Convert naive local datetime into naive UTC, e.g.: # 2019-03-01T12:00:00 (local=US/Central) -> 2019-03-01T18:00:00. # 2019-05-01T12:00:00 (local=US/Central) -> 2019-05-01T17:00:00. # 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00. return datetime.datetime(*time.gmtime(time.mktime(dt.timetuple()))[:6]) def utc_to_local(self, dt): # Convert a naive UTC datetime into local time, e.g.: # 2019-03-01T18:00:00 (local=US/Central) -> 2019-03-01T12:00:00. # 2019-05-01T17:00:00 (local=US/Central) -> 2019-05-01T12:00:00. # 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00. ts = calendar.timegm(dt.utctimetuple()) return datetime.datetime.fromtimestamp(ts) def get_timestamp(self, value): if self.utc: # If utc-mode is on, then we assume all naive datetimes are in UTC. return calendar.timegm(value.utctimetuple()) else: return time.mktime(value.timetuple()) def db_value(self, value): if value is None: return if isinstance(value, datetime.datetime): pass elif isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) else: return int(round(value * self.resolution)) timestamp = self.get_timestamp(value) if self.resolution > 1: timestamp += (value.microsecond * .000001) timestamp *= self.resolution return int(round(timestamp)) def python_value(self, value): if value is not None and isinstance(value, (int, float, long)): if self.resolution > 1: value, ticks = divmod(value, self.resolution) microseconds = int(ticks * self.ticks_to_microsecond) else: microseconds = 0 if self.utc: value = datetime.datetime.utcfromtimestamp(value) else: value = datetime.datetime.fromtimestamp(value) if microseconds: value = value.replace(microsecond=microseconds) return value def from_timestamp(self): expr = ((self / Value(self.resolution, converter=False)) if self.resolution > 1 else self) return self.model._meta.database.from_timestamp(expr) year = property(_timestamp_date_part('year')) month = property(_timestamp_date_part('month')) day = property(_timestamp_date_part('day')) hour = property(_timestamp_date_part('hour')) minute = property(_timestamp_date_part('minute')) second = property(_timestamp_date_part('second')) class IPField(BigIntegerField): def db_value(self, val): if val is not None: return struct.unpack('!I', socket.inet_aton(val))[0] def python_value(self, val): if val is not None: return socket.inet_ntoa(struct.pack('!I', val)) class BooleanField(Field): field_type = 'BOOL' adapt = bool class BareField(Field): def __init__(self, adapt=None, *args, **kwargs): super(BareField, self).__init__(*args, **kwargs) if adapt is not None: self.adapt = adapt def ddl_datatype(self, ctx): return class ForeignKeyField(Field): accessor_class = ForeignKeyAccessor backref_accessor_class = BackrefAccessor def __init__(self, model, field=None, backref=None, on_delete=None, on_update=None, deferrable=None, _deferred=None, rel_model=None, to_field=None, object_id_name=None, lazy_load=True, constraint_name=None, related_name=None, *args, **kwargs): kwargs.setdefault('index', True) super(ForeignKeyField, self).__init__(*args, **kwargs) if rel_model is not None: __deprecated__('"rel_model" has been deprecated in favor of ' '"model" for ForeignKeyField objects.') model = rel_model if to_field is not None: __deprecated__('"to_field" has been deprecated in favor of ' '"field" for ForeignKeyField objects.') field = to_field if related_name is not None: __deprecated__('"related_name" has been deprecated in favor of ' '"backref" for Field objects.') backref = related_name self._is_self_reference = model == 'self' self.rel_model = model self.rel_field = field self.declared_backref = backref self.backref = None self.on_delete = on_delete self.on_update = on_update self.deferrable = deferrable self.deferred = _deferred self.object_id_name = object_id_name self.lazy_load = lazy_load self.constraint_name = constraint_name @property def field_type(self): if not isinstance(self.rel_field, AutoField): return self.rel_field.field_type elif isinstance(self.rel_field, BigAutoField): return BigIntegerField.field_type return IntegerField.field_type def get_modifiers(self): if not isinstance(self.rel_field, AutoField): return self.rel_field.get_modifiers() return super(ForeignKeyField, self).get_modifiers() def adapt(self, value): return self.rel_field.adapt(value) def db_value(self, value): if isinstance(value, self.rel_model): value = getattr(value, self.rel_field.name) return self.rel_field.db_value(value) def python_value(self, value): if isinstance(value, self.rel_model): return value return self.rel_field.python_value(value) def bind(self, model, name, set_attribute=True): if not self.column_name: self.column_name = name if name.endswith('_id') else name + '_id' if not self.object_id_name: self.object_id_name = self.column_name if self.object_id_name == name: self.object_id_name += '_id' elif self.object_id_name == name: raise ValueError('ForeignKeyField "%s"."%s" specifies an ' 'object_id_name that conflicts with its field ' 'name.' % (model._meta.name, name)) if self._is_self_reference: self.rel_model = model if isinstance(self.rel_field, basestring): self.rel_field = getattr(self.rel_model, self.rel_field) elif self.rel_field is None: self.rel_field = self.rel_model._meta.primary_key # Bind field before assigning backref, so field is bound when # calling declared_backref() (if callable). super(ForeignKeyField, self).bind(model, name, set_attribute) self.safe_name = self.object_id_name if callable_(self.declared_backref): self.backref = self.declared_backref(self) else: self.backref, self.declared_backref = self.declared_backref, None if not self.backref: self.backref = '%s_set' % model._meta.name if set_attribute: setattr(model, self.object_id_name, ObjectIdAccessor(self)) if self.backref not in '!+': setattr(self.rel_model, self.backref, self.backref_accessor_class(self)) def foreign_key_constraint(self): parts = [] if self.constraint_name: parts.extend((SQL('CONSTRAINT'), Entity(self.constraint_name))) parts.extend([ SQL('FOREIGN KEY'), EnclosedNodeList((self,)), SQL('REFERENCES'), self.rel_model, EnclosedNodeList((self.rel_field,))]) if self.on_delete: parts.append(SQL('ON DELETE %s' % self.on_delete)) if self.on_update: parts.append(SQL('ON UPDATE %s' % self.on_update)) if self.deferrable: parts.append(SQL('DEFERRABLE %s' % self.deferrable)) return NodeList(parts) def __getattr__(self, attr): if attr.startswith('__'): # Prevent recursion error when deep-copying. raise AttributeError('Cannot look-up non-existant "__" methods.') if attr in self.rel_model._meta.fields: return self.rel_model._meta.fields[attr] raise AttributeError('Foreign-key has no attribute %s, nor is it a ' 'valid field on the related model.' % attr) class DeferredForeignKey(Field): _unresolved = set() def __init__(self, rel_model_name, **kwargs): self.field_kwargs = kwargs self.rel_model_name = rel_model_name.lower() DeferredForeignKey._unresolved.add(self) super(DeferredForeignKey, self).__init__( column_name=kwargs.get('column_name'), null=kwargs.get('null'), primary_key=kwargs.get('primary_key')) __hash__ = object.__hash__ def __deepcopy__(self, memo=None): return DeferredForeignKey(self.rel_model_name, **self.field_kwargs) def set_model(self, rel_model): field = ForeignKeyField(rel_model, _deferred=True, **self.field_kwargs) if field.primary_key: # NOTE: this calls add_field() under-the-hood. self.model._meta.set_primary_key(self.name, field) else: self.model._meta.add_field(self.name, field) @staticmethod def resolve(model_cls): unresolved = sorted(DeferredForeignKey._unresolved, key=operator.attrgetter('_order')) for dr in unresolved: if dr.rel_model_name == model_cls.__name__.lower(): dr.set_model(model_cls) DeferredForeignKey._unresolved.discard(dr) class DeferredThroughModel(object): def __init__(self): self._refs = [] def set_field(self, model, field, name): self._refs.append((model, field, name)) def set_model(self, through_model): for src_model, m2mfield, name in self._refs: m2mfield.through_model = through_model src_model._meta.add_field(name, m2mfield) class MetaField(Field): column_name = default = model = name = None primary_key = False class ManyToManyFieldAccessor(FieldAccessor): def __init__(self, model, field, name): super(ManyToManyFieldAccessor, self).__init__(model, field, name) self.model = field.model self.rel_model = field.rel_model self.through_model = field.through_model src_fks = self.through_model._meta.model_refs[self.model] dest_fks = self.through_model._meta.model_refs[self.rel_model] if not src_fks: raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' % (self.model, self.through_model)) elif not dest_fks: raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' % (self.rel_model, self.through_model)) self.src_fk = src_fks[0] self.dest_fk = dest_fks[0] def __get__(self, instance, instance_type=None, force_query=False): if instance is not None: if not force_query and self.src_fk.backref != '+': backref = getattr(instance, self.src_fk.backref) if isinstance(backref, list): return [getattr(obj, self.dest_fk.name) for obj in backref] src_id = getattr(instance, self.src_fk.rel_field.name) return (ManyToManyQuery(instance, self, self.rel_model) .join(self.through_model) .join(self.model) .where(self.src_fk == src_id)) return self.field def __set__(self, instance, value): query = self.__get__(instance, force_query=True) query.add(value, clear_existing=True) class ManyToManyField(MetaField): accessor_class = ManyToManyFieldAccessor def __init__(self, model, backref=None, through_model=None, on_delete=None, on_update=None, _is_backref=False): if through_model is not None: if not (isinstance(through_model, DeferredThroughModel) or is_model(through_model)): raise TypeError('Unexpected value for through_model. Expected ' 'Model or DeferredThroughModel.') if not _is_backref and (on_delete is not None or on_update is not None): raise ValueError('Cannot specify on_delete or on_update when ' 'through_model is specified.') self.rel_model = model self.backref = backref self._through_model = through_model self._on_delete = on_delete self._on_update = on_update self._is_backref = _is_backref def _get_descriptor(self): return ManyToManyFieldAccessor(self) def bind(self, model, name, set_attribute=True): if isinstance(self._through_model, DeferredThroughModel): self._through_model.set_field(model, self, name) return super(ManyToManyField, self).bind(model, name, set_attribute) if not self._is_backref: many_to_many_field = ManyToManyField( self.model, backref=name, through_model=self.through_model, on_delete=self._on_delete, on_update=self._on_update, _is_backref=True) self.backref = self.backref or model._meta.name + 's' self.rel_model._meta.add_field(self.backref, many_to_many_field) def get_models(self): return [model for _, model in sorted(( (self._is_backref, self.model), (not self._is_backref, self.rel_model)))] @property def through_model(self): if self._through_model is None: self._through_model = self._create_through_model() return self._through_model @through_model.setter def through_model(self, value): self._through_model = value def _create_through_model(self): lhs, rhs = self.get_models() tables = [model._meta.table_name for model in (lhs, rhs)] class Meta: database = self.model._meta.database schema = self.model._meta.schema table_name = '%s_%s_through' % tuple(tables) indexes = ( ((lhs._meta.name, rhs._meta.name), True),) params = {'on_delete': self._on_delete, 'on_update': self._on_update} attrs = { lhs._meta.name: ForeignKeyField(lhs, **params), rhs._meta.name: ForeignKeyField(rhs, **params), 'Meta': Meta} klass_name = '%s%sThrough' % (lhs.__name__, rhs.__name__) return type(klass_name, (Model,), attrs) def get_through_model(self): # XXX: Deprecated. Just use the "through_model" property. return self.through_model class VirtualField(MetaField): field_class = None def __init__(self, field_class=None, *args, **kwargs): Field = field_class if field_class is not None else self.field_class self.field_instance = Field() if Field is not None else None super(VirtualField, self).__init__(*args, **kwargs) def db_value(self, value): if self.field_instance is not None: return self.field_instance.db_value(value) return value def python_value(self, value): if self.field_instance is not None: return self.field_instance.python_value(value) return value def bind(self, model, name, set_attribute=True): self.model = model self.column_name = self.name = self.safe_name = name setattr(model, name, self.accessor_class(model, self, name)) class CompositeKey(MetaField): sequence = None def __init__(self, *field_names): self.field_names = field_names self._safe_field_names = None @property def safe_field_names(self): if self._safe_field_names is None: if self.model is None: return self.field_names self._safe_field_names = [self.model._meta.fields[f].safe_name for f in self.field_names] return self._safe_field_names def __get__(self, instance, instance_type=None): if instance is not None: return tuple([getattr(instance, f) for f in self.safe_field_names]) return self def __set__(self, instance, value): if not isinstance(value, (list, tuple)): raise TypeError('A list or tuple must be used to set the value of ' 'a composite primary key.') if len(value) != len(self.field_names): raise ValueError('The length of the value must equal the number ' 'of columns of the composite primary key.') for idx, field_value in enumerate(value): setattr(instance, self.field_names[idx], field_value) def __eq__(self, other): expressions = [(self.model._meta.fields[field] == value) for field, value in zip(self.field_names, other)] return reduce(operator.and_, expressions) def __ne__(self, other): return ~(self == other) def __hash__(self): return hash((self.model.__name__, self.field_names)) def __sql__(self, ctx): # If the composite PK is being selected, do not use parens. Elsewhere, # such as in an expression, we want to use parentheses and treat it as # a row value. parens = ctx.scope != SCOPE_SOURCE return ctx.sql(NodeList([self.model._meta.fields[field] for field in self.field_names], ', ', parens)) def bind(self, model, name, set_attribute=True): self.model = model self.column_name = self.name = self.safe_name = name setattr(model, self.name, self) class _SortedFieldList(object): __slots__ = ('_keys', '_items') def __init__(self): self._keys = [] self._items = [] def __getitem__(self, i): return self._items[i] def __iter__(self): return iter(self._items) def __contains__(self, item): k = item._sort_key i = bisect_left(self._keys, k) j = bisect_right(self._keys, k) return item in self._items[i:j] def index(self, field): return self._keys.index(field._sort_key) def insert(self, item): k = item._sort_key i = bisect_left(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item) def remove(self, item): idx = self.index(item) del self._items[idx] del self._keys[idx] # MODELS class SchemaManager(object): def __init__(self, model, database=None, **context_options): self.model = model self._database = database context_options.setdefault('scope', SCOPE_VALUES) self.context_options = context_options @property def database(self): db = self._database or self.model._meta.database if db is None: raise ImproperlyConfigured('database attribute does not appear to ' 'be set on the model: %s' % self.model) return db @database.setter def database(self, value): self._database = value def _create_context(self): return self.database.get_sql_context(**self.context_options) def _create_table(self, safe=True, **options): is_temp = options.pop('temporary', False) ctx = self._create_context() ctx.literal('CREATE TEMPORARY TABLE ' if is_temp else 'CREATE TABLE ') if safe: ctx.literal('IF NOT EXISTS ') ctx.sql(self.model).literal(' ') columns = [] constraints = [] meta = self.model._meta if meta.composite_key: pk_columns = [meta.fields[field_name].column for field_name in meta.primary_key.field_names] constraints.append(NodeList((SQL('PRIMARY KEY'), EnclosedNodeList(pk_columns)))) for field in meta.sorted_fields: columns.append(field.ddl(ctx)) if isinstance(field, ForeignKeyField) and not field.deferred: constraints.append(field.foreign_key_constraint()) if meta.constraints: constraints.extend(meta.constraints) constraints.extend(self._create_table_option_sql(options)) ctx.sql(EnclosedNodeList(columns + constraints)) if meta.table_settings is not None: table_settings = ensure_tuple(meta.table_settings) for setting in table_settings: if not isinstance(setting, basestring): raise ValueError('table_settings must be strings') ctx.literal(' ').literal(setting) if meta.without_rowid: ctx.literal(' WITHOUT ROWID') return ctx def _create_table_option_sql(self, options): accum = [] options = merge_dict(self.model._meta.options or {}, options) if not options: return accum for key, value in sorted(options.items()): if not isinstance(value, Node): if is_model(value): value = value._meta.table else: value = SQL(str(value)) accum.append(NodeList((SQL(key), value), glue='=')) return accum def create_table(self, safe=True, **options): self.database.execute(self._create_table(safe=safe, **options)) def _create_table_as(self, table_name, query, safe=True, **meta): ctx = (self._create_context() .literal('CREATE TEMPORARY TABLE ' if meta.get('temporary') else 'CREATE TABLE ')) if safe: ctx.literal('IF NOT EXISTS ') return (ctx .sql(Entity(table_name)) .literal(' AS ') .sql(query)) def create_table_as(self, table_name, query, safe=True, **meta): ctx = self._create_table_as(table_name, query, safe=safe, **meta) self.database.execute(ctx) def _drop_table(self, safe=True, **options): ctx = (self._create_context() .literal('DROP TABLE IF EXISTS ' if safe else 'DROP TABLE ') .sql(self.model)) if options.get('cascade'): ctx = ctx.literal(' CASCADE') elif options.get('restrict'): ctx = ctx.literal(' RESTRICT') return ctx def drop_table(self, safe=True, **options): self.database.execute(self._drop_table(safe=safe, **options)) def _truncate_table(self, restart_identity=False, cascade=False): db = self.database if not db.truncate_table: return (self._create_context() .literal('DELETE FROM ').sql(self.model)) ctx = self._create_context().literal('TRUNCATE TABLE ').sql(self.model) if restart_identity: ctx = ctx.literal(' RESTART IDENTITY') if cascade: ctx = ctx.literal(' CASCADE') return ctx def truncate_table(self, restart_identity=False, cascade=False): self.database.execute(self._truncate_table(restart_identity, cascade)) def _create_indexes(self, safe=True): return [self._create_index(index, safe) for index in self.model._meta.fields_to_index()] def _create_index(self, index, safe=True): if isinstance(index, Index): if not self.database.safe_create_index: index = index.safe(False) elif index._safe != safe: index = index.safe(safe) return self._create_context().sql(index) def create_indexes(self, safe=True): for query in self._create_indexes(safe=safe): self.database.execute(query) def _drop_indexes(self, safe=True): return [self._drop_index(index, safe) for index in self.model._meta.fields_to_index() if isinstance(index, Index)] def _drop_index(self, index, safe): statement = 'DROP INDEX ' if safe and self.database.safe_drop_index: statement += 'IF EXISTS ' if isinstance(index._table, Table) and index._table._schema: index_name = Entity(index._table._schema, index._name) else: index_name = Entity(index._name) return (self ._create_context() .literal(statement) .sql(index_name)) def drop_indexes(self, safe=True): for query in self._drop_indexes(safe=safe): self.database.execute(query) def _check_sequences(self, field): if not field.sequence or not self.database.sequences: raise ValueError('Sequences are either not supported, or are not ' 'defined for "%s".' % field.name) def _sequence_for_field(self, field): if field.model._meta.schema: return Entity(field.model._meta.schema, field.sequence) else: return Entity(field.sequence) def _create_sequence(self, field): self._check_sequences(field) if not self.database.sequence_exists(field.sequence): return (self ._create_context() .literal('CREATE SEQUENCE ') .sql(self._sequence_for_field(field))) def create_sequence(self, field): seq_ctx = self._create_sequence(field) if seq_ctx is not None: self.database.execute(seq_ctx) def _drop_sequence(self, field): self._check_sequences(field) if self.database.sequence_exists(field.sequence): return (self ._create_context() .literal('DROP SEQUENCE ') .sql(self._sequence_for_field(field))) def drop_sequence(self, field): seq_ctx = self._drop_sequence(field) if seq_ctx is not None: self.database.execute(seq_ctx) def _create_foreign_key(self, field): name = 'fk_%s_%s_refs_%s' % (field.model._meta.table_name, field.column_name, field.rel_model._meta.table_name) return (self ._create_context() .literal('ALTER TABLE ') .sql(field.model) .literal(' ADD CONSTRAINT ') .sql(Entity(_truncate_constraint_name(name))) .literal(' ') .sql(field.foreign_key_constraint())) def create_foreign_key(self, field): self.database.execute(self._create_foreign_key(field)) def create_sequences(self): if self.database.sequences: for field in self.model._meta.sorted_fields: if field.sequence: self.create_sequence(field) def create_all(self, safe=True, **table_options): self.create_sequences() self.create_table(safe, **table_options) self.create_indexes(safe=safe) def drop_sequences(self): if self.database.sequences: for field in self.model._meta.sorted_fields: if field.sequence: self.drop_sequence(field) def drop_all(self, safe=True, drop_sequences=True, **options): self.drop_table(safe, **options) if drop_sequences: self.drop_sequences() class Metadata(object): def __init__(self, model, database=None, table_name=None, indexes=None, primary_key=None, constraints=None, schema=None, only_save_dirty=False, depends_on=None, options=None, db_table=None, table_function=None, table_settings=None, without_rowid=False, temporary=False, legacy_table_names=True, **kwargs): if db_table is not None: __deprecated__('"db_table" has been deprecated in favor of ' '"table_name" for Models.') table_name = db_table self.model = model self.database = database self.fields = {} self.columns = {} self.combined = {} self._sorted_field_list = _SortedFieldList() self.sorted_fields = [] self.sorted_field_names = [] self.defaults = {} self._default_by_name = {} self._default_dict = {} self._default_callables = {} self._default_callable_list = [] self.name = model.__name__.lower() self.table_function = table_function self.legacy_table_names = legacy_table_names if not table_name: table_name = (self.table_function(model) if self.table_function else self.make_table_name()) self.table_name = table_name self._table = None self.indexes = list(indexes) if indexes else [] self.constraints = constraints self._schema = schema self.primary_key = primary_key self.composite_key = self.auto_increment = None self.only_save_dirty = only_save_dirty self.depends_on = depends_on self.table_settings = table_settings self.without_rowid = without_rowid self.temporary = temporary self.refs = {} self.backrefs = {} self.model_refs = collections.defaultdict(list) self.model_backrefs = collections.defaultdict(list) self.manytomany = {} self.options = options or {} for key, value in kwargs.items(): setattr(self, key, value) self._additional_keys = set(kwargs.keys()) # Allow objects to register hooks that are called if the model is bound # to a different database. For example, BlobField uses a different # Python data-type depending on the db driver / python version. When # the database changes, we need to update any BlobField so they can use # the appropriate data-type. self._db_hooks = [] def make_table_name(self): if self.legacy_table_names: return re.sub(r'[^\w]+', '_', self.name) return make_snake_case(self.model.__name__) def model_graph(self, refs=True, backrefs=True, depth_first=True): if not refs and not backrefs: raise ValueError('One of `refs` or `backrefs` must be True.') accum = [(None, self.model, None)] seen = set() queue = collections.deque((self,)) method = queue.pop if depth_first else queue.popleft while queue: curr = method() if curr in seen: continue seen.add(curr) if refs: for fk, model in curr.refs.items(): accum.append((fk, model, False)) queue.append(model._meta) if backrefs: for fk, model in curr.backrefs.items(): accum.append((fk, model, True)) queue.append(model._meta) return accum def add_ref(self, field): rel = field.rel_model self.refs[field] = rel self.model_refs[rel].append(field) rel._meta.backrefs[field] = self.model rel._meta.model_backrefs[self.model].append(field) def remove_ref(self, field): rel = field.rel_model del self.refs[field] self.model_refs[rel].remove(field) del rel._meta.backrefs[field] rel._meta.model_backrefs[self.model].remove(field) def add_manytomany(self, field): self.manytomany[field.name] = field def remove_manytomany(self, field): del self.manytomany[field.name] @property def table(self): if self._table is None: self._table = Table( self.table_name, [field.column_name for field in self.sorted_fields], schema=self.schema, _model=self.model, _database=self.database) return self._table @table.setter def table(self, value): raise AttributeError('Cannot set the "table".') @table.deleter def table(self): self._table = None @property def schema(self): return self._schema @schema.setter def schema(self, value): self._schema = value del self.table @property def entity(self): if self._schema: return Entity(self._schema, self.table_name) else: return Entity(self.table_name) def _update_sorted_fields(self): self.sorted_fields = list(self._sorted_field_list) self.sorted_field_names = [f.name for f in self.sorted_fields] def get_rel_for_model(self, model): if isinstance(model, ModelAlias): model = model.model forwardrefs = self.model_refs.get(model, []) backrefs = self.model_backrefs.get(model, []) return (forwardrefs, backrefs) def add_field(self, field_name, field, set_attribute=True): if field_name in self.fields: self.remove_field(field_name) elif field_name in self.manytomany: self.remove_manytomany(self.manytomany[field_name]) if not isinstance(field, MetaField): del self.table field.bind(self.model, field_name, set_attribute) self.fields[field.name] = field self.columns[field.column_name] = field self.combined[field.name] = field self.combined[field.column_name] = field self._sorted_field_list.insert(field) self._update_sorted_fields() if field.default is not None: # This optimization helps speed up model instance construction. self.defaults[field] = field.default if callable_(field.default): self._default_callables[field] = field.default self._default_callable_list.append((field.name, field.default)) else: self._default_dict[field] = field.default self._default_by_name[field.name] = field.default else: field.bind(self.model, field_name, set_attribute) if isinstance(field, ForeignKeyField): self.add_ref(field) elif isinstance(field, ManyToManyField) and field.name: self.add_manytomany(field) def remove_field(self, field_name): if field_name not in self.fields: return del self.table original = self.fields.pop(field_name) del self.columns[original.column_name] del self.combined[field_name] try: del self.combined[original.column_name] except KeyError: pass self._sorted_field_list.remove(original) self._update_sorted_fields() if original.default is not None: del self.defaults[original] if self._default_callables.pop(original, None): for i, (name, _) in enumerate(self._default_callable_list): if name == field_name: self._default_callable_list.pop(i) break else: self._default_dict.pop(original, None) self._default_by_name.pop(original.name, None) if isinstance(original, ForeignKeyField): self.remove_ref(original) def set_primary_key(self, name, field): self.composite_key = isinstance(field, CompositeKey) self.add_field(name, field) self.primary_key = field self.auto_increment = ( field.auto_increment or bool(field.sequence)) def get_primary_keys(self): if self.composite_key: return tuple([self.fields[field_name] for field_name in self.primary_key.field_names]) else: return (self.primary_key,) if self.primary_key is not False else () def get_default_dict(self): dd = self._default_by_name.copy() for field_name, default in self._default_callable_list: dd[field_name] = default() return dd def fields_to_index(self): indexes = [] for f in self.sorted_fields: if f.primary_key: continue if f.index or f.unique: indexes.append(ModelIndex(self.model, (f,), unique=f.unique, using=f.index_type)) for index_obj in self.indexes: if isinstance(index_obj, Node): indexes.append(index_obj) elif isinstance(index_obj, (list, tuple)): index_parts, unique = index_obj fields = [] for part in index_parts: if isinstance(part, basestring): fields.append(self.combined[part]) elif isinstance(part, Node): fields.append(part) else: raise ValueError('Expected either a field name or a ' 'subclass of Node. Got: %s' % part) indexes.append(ModelIndex(self.model, fields, unique=unique)) return indexes def set_database(self, database): self.database = database self.model._schema._database = database del self.table # Apply any hooks that have been registered. for hook in self._db_hooks: hook(database) def set_table_name(self, table_name): self.table_name = table_name del self.table class SubclassAwareMetadata(Metadata): models = [] def __init__(self, model, *args, **kwargs): super(SubclassAwareMetadata, self).__init__(model, *args, **kwargs) self.models.append(model) def map_models(self, fn): for model in self.models: fn(model) class DoesNotExist(Exception): pass class ModelBase(type): inheritable = set(['constraints', 'database', 'indexes', 'primary_key', 'options', 'schema', 'table_function', 'temporary', 'only_save_dirty', 'legacy_table_names', 'table_settings']) def __new__(cls, name, bases, attrs): if name == MODEL_BASE or bases[0].__name__ == MODEL_BASE: return super(ModelBase, cls).__new__(cls, name, bases, attrs) meta_options = {} meta = attrs.pop('Meta', None) if meta: for k, v in meta.__dict__.items(): if not k.startswith('_'): meta_options[k] = v pk = getattr(meta, 'primary_key', None) pk_name = parent_pk = None # Inherit any field descriptors by deep copying the underlying field # into the attrs of the new model, additionally see if the bases define # inheritable model options and swipe them. for b in bases: if not hasattr(b, '_meta'): continue base_meta = b._meta if parent_pk is None: parent_pk = deepcopy(base_meta.primary_key) all_inheritable = cls.inheritable | base_meta._additional_keys for k in base_meta.__dict__: if k in all_inheritable and k not in meta_options: meta_options[k] = base_meta.__dict__[k] meta_options.setdefault('schema', base_meta.schema) for (k, v) in b.__dict__.items(): if k in attrs: continue if isinstance(v, FieldAccessor) and not v.field.primary_key: attrs[k] = deepcopy(v.field) sopts = meta_options.pop('schema_options', None) or {} Meta = meta_options.get('model_metadata_class', Metadata) Schema = meta_options.get('schema_manager_class', SchemaManager) # Construct the new class. cls = super(ModelBase, cls).__new__(cls, name, bases, attrs) cls.__data__ = cls.__rel__ = None cls._meta = Meta(cls, **meta_options) cls._schema = Schema(cls, **sopts) fields = [] for key, value in cls.__dict__.items(): if isinstance(value, Field): if value.primary_key and pk: raise ValueError('over-determined primary key %s.' % name) elif value.primary_key: pk, pk_name = value, key else: fields.append((key, value)) if pk is None: if parent_pk is not False: pk, pk_name = ((parent_pk, parent_pk.name) if parent_pk is not None else (AutoField(), 'id')) else: pk = False elif isinstance(pk, CompositeKey): pk_name = '__composite_key__' cls._meta.composite_key = True if pk is not False: cls._meta.set_primary_key(pk_name, pk) for name, field in fields: cls._meta.add_field(name, field) # Create a repr and error class before finalizing. if hasattr(cls, '__str__') and '__repr__' not in attrs: setattr(cls, '__repr__', lambda self: '<%s: %s>' % ( cls.__name__, self.__str__())) exc_name = '%sDoesNotExist' % cls.__name__ exc_attrs = {'__module__': cls.__module__} exception_class = type(exc_name, (DoesNotExist,), exc_attrs) cls.DoesNotExist = exception_class # Call validation hook, allowing additional model validation. cls.validate_model() DeferredForeignKey.resolve(cls) return cls def __repr__(self): return '<Model: %s>' % self.__name__ def __iter__(self): return iter(self.select()) def __getitem__(self, key): return self.get_by_id(key) def __setitem__(self, key, value): self.set_by_id(key, value) def __delitem__(self, key): self.delete_by_id(key) def __contains__(self, key): try: self.get_by_id(key) except self.DoesNotExist: return False else: return True def __len__(self): return self.select().count() def __bool__(self): return True __nonzero__ = __bool__ # Python 2. def __sql__(self, ctx): return ctx.sql(self._meta.table) class _BoundModelsContext(_callable_context_manager): def __init__(self, models, database, bind_refs, bind_backrefs): self.models = models self.database = database self.bind_refs = bind_refs self.bind_backrefs = bind_backrefs def __enter__(self): self._orig_database = [] for model in self.models: self._orig_database.append(model._meta.database) model.bind(self.database, self.bind_refs, self.bind_backrefs, _exclude=set(self.models)) return self.models def __exit__(self, exc_type, exc_val, exc_tb): for model, db in zip(self.models, self._orig_database): model.bind(db, self.bind_refs, self.bind_backrefs, _exclude=set(self.models)) class Model(with_metaclass(ModelBase, Node)): def __init__(self, *args, **kwargs): if kwargs.pop('__no_default__', None): self.__data__ = {} else: self.__data__ = self._meta.get_default_dict() self._dirty = set(self.__data__) self.__rel__ = {} for k in kwargs: setattr(self, k, kwargs[k]) def __str__(self): return str(self._pk) if self._meta.primary_key is not False else 'n/a' @classmethod def validate_model(cls): pass @classmethod def alias(cls, alias=None): return ModelAlias(cls, alias) @classmethod def select(cls, *fields): is_default = not fields if not fields: fields = cls._meta.sorted_fields return ModelSelect(cls, fields, is_default=is_default) @classmethod def _normalize_data(cls, data, kwargs): normalized = {} if data: if not isinstance(data, dict): if kwargs: raise ValueError('Data cannot be mixed with keyword ' 'arguments: %s' % data) return data for key in data: try: field = (key if isinstance(key, Field) else cls._meta.combined[key]) except KeyError: if not isinstance(key, Node): raise ValueError('Unrecognized field name: "%s" in %s.' % (key, data)) field = key normalized[field] = data[key] if kwargs: for key in kwargs: try: normalized[cls._meta.combined[key]] = kwargs[key] except KeyError: normalized[getattr(cls, key)] = kwargs[key] return normalized @classmethod def update(cls, __data=None, **update): return ModelUpdate(cls, cls._normalize_data(__data, update)) @classmethod def insert(cls, __data=None, **insert): return ModelInsert(cls, cls._normalize_data(__data, insert)) @classmethod def insert_many(cls, rows, fields=None): return ModelInsert(cls, insert=rows, columns=fields) @classmethod def insert_from(cls, query, fields): columns = [getattr(cls, field) if isinstance(field, basestring) else field for field in fields] return ModelInsert(cls, insert=query, columns=columns) @classmethod def replace(cls, __data=None, **insert): return cls.insert(__data, **insert).on_conflict('REPLACE') @classmethod def replace_many(cls, rows, fields=None): return (cls .insert_many(rows=rows, fields=fields) .on_conflict('REPLACE')) @classmethod def raw(cls, sql, *params): return ModelRaw(cls, sql, params) @classmethod def delete(cls): return ModelDelete(cls) @classmethod def create(cls, **query): inst = cls(**query) inst.save(force_insert=True) return inst @classmethod def bulk_create(cls, model_list, batch_size=None): if batch_size is not None: batches = chunked(model_list, batch_size) else: batches = [model_list] field_names = list(cls._meta.sorted_field_names) if cls._meta.auto_increment: pk_name = cls._meta.primary_key.name field_names.remove(pk_name) if cls._meta.database.returning_clause and \ cls._meta.primary_key is not False: pk_fields = cls._meta.get_primary_keys() else: pk_fields = None fields = [cls._meta.fields[field_name] for field_name in field_names] attrs = [] for field in fields: if isinstance(field, ForeignKeyField): attrs.append(field.object_id_name) else: attrs.append(field.name) for batch in batches: accum = ([getattr(model, f) for f in attrs] for model in batch) res = cls.insert_many(accum, fields=fields).execute() if pk_fields and res is not None: for row, model in zip(res, batch): for (pk_field, obj_id) in zip(pk_fields, row): setattr(model, pk_field.name, obj_id) @classmethod def bulk_update(cls, model_list, fields, batch_size=None): if isinstance(cls._meta.primary_key, CompositeKey): raise ValueError('bulk_update() is not supported for models with ' 'a composite primary key.') # First normalize list of fields so all are field instances. fields = [cls._meta.fields[f] if isinstance(f, basestring) else f for f in fields] # Now collect list of attribute names to use for values. attrs = [field.object_id_name if isinstance(field, ForeignKeyField) else field.name for field in fields] if batch_size is not None: batches = chunked(model_list, batch_size) else: batches = [model_list] n = 0 pk = cls._meta.primary_key for batch in batches: id_list = [model._pk for model in batch] update = {} for field, attr in zip(fields, attrs): accum = [] for model in batch: value = getattr(model, attr) if not isinstance(value, Node): value = field.to_value(value) accum.append((pk.to_value(model._pk), value)) case = Case(pk, accum) update[field] = case n += (cls.update(update) .where(cls._meta.primary_key.in_(id_list)) .execute()) return n @classmethod def noop(cls): return NoopModelSelect(cls, ()) @classmethod def get(cls, *query, **filters): sq = cls.select() if query: # Handle simple lookup using just the primary key. if len(query) == 1 and isinstance(query[0], int): sq = sq.where(cls._meta.primary_key == query[0]) else: sq = sq.where(*query) if filters: sq = sq.filter(**filters) return sq.get() @classmethod def get_or_none(cls, *query, **filters): try: return cls.get(*query, **filters) except DoesNotExist: pass @classmethod def get_by_id(cls, pk): return cls.get(cls._meta.primary_key == pk) @classmethod def set_by_id(cls, key, value): if key is None: return cls.insert(value).execute() else: return (cls.update(value) .where(cls._meta.primary_key == key).execute()) @classmethod def delete_by_id(cls, pk): return cls.delete().where(cls._meta.primary_key == pk).execute() @classmethod def get_or_create(cls, **kwargs): defaults = kwargs.pop('defaults', {}) query = cls.select() for field, value in kwargs.items(): query = query.where(getattr(cls, field) == value) try: return query.get(), False except cls.DoesNotExist: try: if defaults: kwargs.update(defaults) with cls._meta.database.atomic(): return cls.create(**kwargs), True except IntegrityError as exc: try: return query.get(), False except cls.DoesNotExist: raise exc @classmethod def filter(cls, *dq_nodes, **filters): return cls.select().filter(*dq_nodes, **filters) def get_id(self): # Using getattr(self, pk-name) could accidentally trigger a query if # the primary-key is a foreign-key. So we use the safe_name attribute, # which defaults to the field-name, but will be the object_id_name for # foreign-key fields. if self._meta.primary_key is not False: return getattr(self, self._meta.primary_key.safe_name) _pk = property(get_id) @_pk.setter def _pk(self, value): setattr(self, self._meta.primary_key.name, value) def _pk_expr(self): return self._meta.primary_key == self._pk def _prune_fields(self, field_dict, only): new_data = {} for field in only: if isinstance(field, basestring): field = self._meta.combined[field] if field.name in field_dict: new_data[field.name] = field_dict[field.name] return new_data def _populate_unsaved_relations(self, field_dict): for foreign_key_field in self._meta.refs: foreign_key = foreign_key_field.name conditions = ( foreign_key in field_dict and field_dict[foreign_key] is None and self.__rel__.get(foreign_key) is not None) if conditions: setattr(self, foreign_key, getattr(self, foreign_key)) field_dict[foreign_key] = self.__data__[foreign_key] def save(self, force_insert=False, only=None): field_dict = self.__data__.copy() if self._meta.primary_key is not False: pk_field = self._meta.primary_key pk_value = self._pk else: pk_field = pk_value = None if only is not None: field_dict = self._prune_fields(field_dict, only) elif self._meta.only_save_dirty and not force_insert: field_dict = self._prune_fields(field_dict, self.dirty_fields) if not field_dict: self._dirty.clear() return False self._populate_unsaved_relations(field_dict) rows = 1 if self._meta.auto_increment and pk_value is None: field_dict.pop(pk_field.name, None) if pk_value is not None and not force_insert: if self._meta.composite_key: for pk_part_name in pk_field.field_names: field_dict.pop(pk_part_name, None) else: field_dict.pop(pk_field.name, None) if not field_dict: raise ValueError('no data to save!') rows = self.update(**field_dict).where(self._pk_expr()).execute() elif pk_field is not None: pk = self.insert(**field_dict).execute() if pk is not None and (self._meta.auto_increment or pk_value is None): self._pk = pk else: self.insert(**field_dict).execute() self._dirty.clear() return rows def is_dirty(self): return bool(self._dirty) @property def dirty_fields(self): return [f for f in self._meta.sorted_fields if f.name in self._dirty] def dependencies(self, search_nullable=False): model_class = type(self) stack = [(type(self), None)] seen = set() while stack: klass, query = stack.pop() if klass in seen: continue seen.add(klass) for fk, rel_model in klass._meta.backrefs.items(): if rel_model is model_class or query is None: node = (fk == self.__data__[fk.rel_field.name]) else: node = fk << query subquery = (rel_model.select(rel_model._meta.primary_key) .where(node)) if not fk.null or search_nullable: stack.append((rel_model, subquery)) yield (node, fk) def delete_instance(self, recursive=False, delete_nullable=False): if recursive: dependencies = self.dependencies(delete_nullable) for query, fk in reversed(list(dependencies)): model = fk.model if fk.null and not delete_nullable: model.update(**{fk.name: None}).where(query).execute() else: model.delete().where(query).execute() return type(self).delete().where(self._pk_expr()).execute() def __hash__(self): return hash((self.__class__, self._pk)) def __eq__(self, other): return ( other.__class__ == self.__class__ and self._pk is not None and self._pk == other._pk) def __ne__(self, other): return not self == other def __sql__(self, ctx): # NOTE: when comparing a foreign-key field whose related-field is not a # primary-key, then doing an equality test for the foreign-key with a # model instance will return the wrong value; since we would return # the primary key for a given model instance. # # This checks to see if we have a converter in the scope, and that we # are converting a foreign-key expression. If so, we hand the model # instance to the converter rather than blindly grabbing the primary- # key. In the event the provided converter fails to handle the model # instance, then we will return the primary-key. if ctx.state.converter is not None and ctx.state.is_fk_expr: try: return ctx.sql(Value(self, converter=ctx.state.converter)) except (TypeError, ValueError): pass return ctx.sql(Value(getattr(self, self._meta.primary_key.name), converter=self._meta.primary_key.db_value)) @classmethod def bind(cls, database, bind_refs=True, bind_backrefs=True, _exclude=None): is_different = cls._meta.database is not database cls._meta.set_database(database) if bind_refs or bind_backrefs: if _exclude is None: _exclude = set() G = cls._meta.model_graph(refs=bind_refs, backrefs=bind_backrefs) for _, model, is_backref in G: if model not in _exclude: model._meta.set_database(database) _exclude.add(model) return is_different @classmethod def bind_ctx(cls, database, bind_refs=True, bind_backrefs=True): return _BoundModelsContext((cls,), database, bind_refs, bind_backrefs) @classmethod def table_exists(cls): M = cls._meta return cls._schema.database.table_exists(M.table.__name__, M.schema) @classmethod def create_table(cls, safe=True, **options): if 'fail_silently' in options: __deprecated__('"fail_silently" has been deprecated in favor of ' '"safe" for the create_table() method.') safe = options.pop('fail_silently') if safe and not cls._schema.database.safe_create_index \ and cls.table_exists(): return if cls._meta.temporary: options.setdefault('temporary', cls._meta.temporary) cls._schema.create_all(safe, **options) @classmethod def drop_table(cls, safe=True, drop_sequences=True, **options): if safe and not cls._schema.database.safe_drop_index \ and not cls.table_exists(): return if cls._meta.temporary: options.setdefault('temporary', cls._meta.temporary) cls._schema.drop_all(safe, drop_sequences, **options) @classmethod def truncate_table(cls, **options): cls._schema.truncate_table(**options) @classmethod def index(cls, *fields, **kwargs): return ModelIndex(cls, fields, **kwargs) @classmethod def add_index(cls, *fields, **kwargs): if len(fields) == 1 and isinstance(fields[0], (SQL, Index)): cls._meta.indexes.append(fields[0]) else: cls._meta.indexes.append(ModelIndex(cls, fields, **kwargs)) class ModelAlias(Node): """Provide a separate reference to a model in a query.""" def __init__(self, model, alias=None): self.__dict__['model'] = model self.__dict__['alias'] = alias def __getattr__(self, attr): # Hack to work-around the fact that properties or other objects # implementing the descriptor protocol (on the model being aliased), # will not work correctly when we use getattr(). So we explicitly pass # the model alias to the descriptor's getter. try: obj = self.model.__dict__[attr] except KeyError: pass else: if isinstance(obj, ModelDescriptor): return obj.__get__(None, self) model_attr = getattr(self.model, attr) if isinstance(model_attr, Field): self.__dict__[attr] = FieldAlias.create(self, model_attr) return self.__dict__[attr] return model_attr def __setattr__(self, attr, value): raise AttributeError('Cannot set attributes on model aliases.') def get_field_aliases(self): return [getattr(self, n) for n in self.model._meta.sorted_field_names] def select(self, *selection): if not selection: selection = self.get_field_aliases() return ModelSelect(self, selection) def __call__(self, **kwargs): return self.model(**kwargs) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: # Return the quoted table name. return ctx.sql(self.model) if self.alias: ctx.alias_manager[self] = self.alias if ctx.scope == SCOPE_SOURCE: # Define the table and its alias. return (ctx .sql(self.model._meta.entity) .literal(' AS ') .sql(Entity(ctx.alias_manager[self]))) else: # Refer to the table using the alias. return ctx.sql(Entity(ctx.alias_manager[self])) class FieldAlias(Field): def __init__(self, source, field): self.source = source self.model = source.model self.field = field @classmethod def create(cls, source, field): class _FieldAlias(cls, type(field)): pass return _FieldAlias(source, field) def clone(self): return FieldAlias(self.source, self.field) def adapt(self, value): return self.field.adapt(value) def python_value(self, value): return self.field.python_value(value) def db_value(self, value): return self.field.db_value(value) def __getattr__(self, attr): return self.source if attr == 'model' else getattr(self.field, attr) def __sql__(self, ctx): return ctx.sql(Column(self.source, self.field.column_name)) def sort_models(models): models = set(models) seen = set() ordering = [] def dfs(model): if model in models and model not in seen: seen.add(model) for foreign_key, rel_model in model._meta.refs.items(): # Do not depth-first search deferred foreign-keys as this can # cause tables to be created in the incorrect order. if not foreign_key.deferred: dfs(rel_model) if model._meta.depends_on: for dependency in model._meta.depends_on: dfs(dependency) ordering.append(model) names = lambda m: (m._meta.name, m._meta.table_name) for m in sorted(models, key=names): dfs(m) return ordering class _ModelQueryHelper(object): default_row_type = ROW.MODEL def __init__(self, *args, **kwargs): super(_ModelQueryHelper, self).__init__(*args, **kwargs) if not self._database: self._database = self.model._meta.database @Node.copy def objects(self, constructor=None): self._row_type = ROW.CONSTRUCTOR self._constructor = self.model if constructor is None else constructor def _get_cursor_wrapper(self, cursor): row_type = self._row_type or self.default_row_type if row_type == ROW.MODEL: return self._get_model_cursor_wrapper(cursor) elif row_type == ROW.DICT: return ModelDictCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.TUPLE: return ModelTupleCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.NAMED_TUPLE: return ModelNamedTupleCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.CONSTRUCTOR: return ModelObjectCursorWrapper(cursor, self.model, self._returning, self._constructor) else: raise ValueError('Unrecognized row type: "%s".' % row_type) def _get_model_cursor_wrapper(self, cursor): return ModelObjectCursorWrapper(cursor, self.model, [], self.model) class ModelRaw(_ModelQueryHelper, RawQuery): def __init__(self, model, sql, params, **kwargs): self.model = model self._returning = () super(ModelRaw, self).__init__(sql=sql, params=params, **kwargs) def get(self): try: return self.execute()[0] except IndexError: sql, params = self.sql() raise self.model.DoesNotExist('%s instance matching query does ' 'not exist:\nSQL: %s\nParams: %s' % (self.model, sql, params)) class BaseModelSelect(_ModelQueryHelper): def union_all(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'UNION ALL', rhs) __add__ = union_all def union(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'UNION', rhs) __or__ = union def intersect(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'INTERSECT', rhs) __and__ = intersect def except_(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'EXCEPT', rhs) __sub__ = except_ def __iter__(self): if not self._cursor_wrapper: self.execute() return iter(self._cursor_wrapper) def prefetch(self, *subqueries): return prefetch(self, *subqueries) def get(self, database=None): clone = self.paginate(1, 1) clone._cursor_wrapper = None try: return clone.execute(database)[0] except IndexError: sql, params = clone.sql() raise self.model.DoesNotExist('%s instance matching query does ' 'not exist:\nSQL: %s\nParams: %s' % (clone.model, sql, params)) def get_or_none(self, database=None): try: return self.get(database=database) except self.model.DoesNotExist: pass @Node.copy def group_by(self, *columns): grouping = [] for column in columns: if is_model(column): grouping.extend(column._meta.sorted_fields) elif isinstance(column, Table): if not column._columns: raise ValueError('Cannot pass a table to group_by() that ' 'does not have columns explicitly ' 'declared.') grouping.extend([getattr(column, col_name) for col_name in column._columns]) else: grouping.append(column) self._group_by = grouping class ModelCompoundSelectQuery(BaseModelSelect, CompoundSelectQuery): def __init__(self, model, *args, **kwargs): self.model = model super(ModelCompoundSelectQuery, self).__init__(*args, **kwargs) def _get_model_cursor_wrapper(self, cursor): return self.lhs._get_model_cursor_wrapper(cursor) def _normalize_model_select(fields_or_models): fields = [] for fm in fields_or_models: if is_model(fm): fields.extend(fm._meta.sorted_fields) elif isinstance(fm, ModelAlias): fields.extend(fm.get_field_aliases()) elif isinstance(fm, Table) and fm._columns: fields.extend([getattr(fm, col) for col in fm._columns]) else: fields.append(fm) return fields class ModelSelect(BaseModelSelect, Select): def __init__(self, model, fields_or_models, is_default=False): self.model = self._join_ctx = model self._joins = {} self._is_default = is_default fields = _normalize_model_select(fields_or_models) super(ModelSelect, self).__init__([model], fields) def clone(self): clone = super(ModelSelect, self).clone() if clone._joins: clone._joins = dict(clone._joins) return clone def select(self, *fields_or_models): if fields_or_models or not self._is_default: self._is_default = False fields = _normalize_model_select(fields_or_models) return super(ModelSelect, self).select(*fields) return self def switch(self, ctx=None): self._join_ctx = self.model if ctx is None else ctx return self def _get_model(self, src): if is_model(src): return src, True elif isinstance(src, Table) and src._model: return src._model, False elif isinstance(src, ModelAlias): return src.model, False elif isinstance(src, ModelSelect): return src.model, False return None, False def _normalize_join(self, src, dest, on, attr): # Allow "on" expression to have an alias that determines the # destination attribute for the joined data. on_alias = isinstance(on, Alias) if on_alias: attr = attr or on._alias on = on.alias() # Obtain references to the source and destination models being joined. src_model, src_is_model = self._get_model(src) dest_model, dest_is_model = self._get_model(dest) if src_model and dest_model: self._join_ctx = dest constructor = dest_model # In the case where the "on" clause is a Column or Field, we will # convert that field into the appropriate predicate expression. if not (src_is_model and dest_is_model) and isinstance(on, Column): if on.source is src: to_field = src_model._meta.columns[on.name] elif on.source is dest: to_field = dest_model._meta.columns[on.name] else: raise AttributeError('"on" clause Column %s does not ' 'belong to %s or %s.' % (on, src_model, dest_model)) on = None elif isinstance(on, Field): to_field = on on = None else: to_field = None fk_field, is_backref = self._generate_on_clause( src_model, dest_model, to_field, on) if on is None: src_attr = 'name' if src_is_model else 'column_name' dest_attr = 'name' if dest_is_model else 'column_name' if is_backref: lhs = getattr(dest, getattr(fk_field, dest_attr)) rhs = getattr(src, getattr(fk_field.rel_field, src_attr)) else: lhs = getattr(src, getattr(fk_field, src_attr)) rhs = getattr(dest, getattr(fk_field.rel_field, dest_attr)) on = (lhs == rhs) if not attr: if fk_field is not None and not is_backref: attr = fk_field.name else: attr = dest_model._meta.name elif on_alias and fk_field is not None and \ attr == fk_field.object_id_name and not is_backref: raise ValueError('Cannot assign join alias to "%s", as this ' 'attribute is the object_id_name for the ' 'foreign-key field "%s"' % (attr, fk_field)) elif isinstance(dest, Source): constructor = dict attr = attr or dest._alias if not attr and isinstance(dest, Table): attr = attr or dest.__name__ return (on, attr, constructor) def _generate_on_clause(self, src, dest, to_field=None, on=None): meta = src._meta is_backref = fk_fields = False # Get all the foreign keys between source and dest, and determine if # the join is via a back-reference. if dest in meta.model_refs: fk_fields = meta.model_refs[dest] elif dest in meta.model_backrefs: fk_fields = meta.model_backrefs[dest] is_backref = True if not fk_fields: if on is not None: return None, False raise ValueError('Unable to find foreign key between %s and %s. ' 'Please specify an explicit join condition.' % (src, dest)) elif to_field is not None: # If the foreign-key field was specified explicitly, remove all # other foreign-key fields from the list. target = (to_field.field if isinstance(to_field, FieldAlias) else to_field) fk_fields = [f for f in fk_fields if ( (f is target) or (is_backref and f.rel_field is to_field))] if len(fk_fields) == 1: return fk_fields[0], is_backref if on is None: # If multiple foreign-keys exist, try using the FK whose name # matches that of the related model. If not, raise an error as this # is ambiguous. for fk in fk_fields: if fk.name == dest._meta.name: return fk, is_backref raise ValueError('More than one foreign key between %s and %s.' ' Please specify which you are joining on.' % (src, dest)) # If there are multiple foreign-keys to choose from and the join # predicate is an expression, we'll try to figure out which # foreign-key field we're joining on so that we can assign to the # correct attribute when resolving the model graph. to_field = None if isinstance(on, Expression): lhs, rhs = on.lhs, on.rhs # Coerce to set() so that we force Python to compare using the # object's hash rather than equality test, which returns a # false-positive due to overriding __eq__. fk_set = set(fk_fields) if isinstance(lhs, Field): lhs_f = lhs.field if isinstance(lhs, FieldAlias) else lhs if lhs_f in fk_set: to_field = lhs_f elif isinstance(rhs, Field): rhs_f = rhs.field if isinstance(rhs, FieldAlias) else rhs if rhs_f in fk_set: to_field = rhs_f return to_field, False @Node.copy def join(self, dest, join_type=JOIN.INNER, on=None, src=None, attr=None): src = self._join_ctx if src is None else src if join_type == JOIN.LATERAL or join_type == JOIN.LEFT_LATERAL: on = True elif join_type != JOIN.CROSS: on, attr, constructor = self._normalize_join(src, dest, on, attr) if attr: self._joins.setdefault(src, []) self._joins[src].append((dest, attr, constructor, join_type)) elif on is not None: raise ValueError('Cannot specify on clause with cross join.') if not self._from_list: raise ValueError('No sources to join on.') item = self._from_list.pop() self._from_list.append(Join(item, dest, join_type, on)) def join_from(self, src, dest, join_type=JOIN.INNER, on=None, attr=None): return self.join(dest, join_type, on, src, attr) def _get_model_cursor_wrapper(self, cursor): if len(self._from_list) == 1 and not self._joins: return ModelObjectCursorWrapper(cursor, self.model, self._returning, self.model) return ModelCursorWrapper(cursor, self.model, self._returning, self._from_list, self._joins) def ensure_join(self, lm, rm, on=None, **join_kwargs): join_ctx = self._join_ctx for dest, _, constructor, _ in self._joins.get(lm, []): if dest == rm: return self return self.switch(lm).join(rm, on=on, **join_kwargs).switch(join_ctx) def convert_dict_to_node(self, qdict): accum = [] joins = [] fks = (ForeignKeyField, BackrefAccessor) for key, value in sorted(qdict.items()): curr = self.model if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP: key, op = key.rsplit('__', 1) op = DJANGO_MAP[op] elif value is None: op = DJANGO_MAP['is'] else: op = DJANGO_MAP['eq'] if '__' not in key: # Handle simplest case. This avoids joining over-eagerly when a # direct FK lookup is all that is required. model_attr = getattr(curr, key) else: for piece in key.split('__'): for dest, attr, _, _ in self._joins.get(curr, ()): if attr == piece or (isinstance(dest, ModelAlias) and dest.alias == piece): curr = dest break else: model_attr = getattr(curr, piece) if value is not None and isinstance(model_attr, fks): curr = model_attr.rel_model joins.append(model_attr) accum.append(op(model_attr, value)) return accum, joins def filter(self, *args, **kwargs): # normalize args and kwargs into a new expression if args and kwargs: dq_node = (reduce(operator.and_, [a.clone() for a in args]) & DQ(**kwargs)) elif args: dq_node = (reduce(operator.and_, [a.clone() for a in args]) & ColumnBase()) elif kwargs: dq_node = DQ(**kwargs) & ColumnBase() else: return self.clone() # dq_node should now be an Expression, lhs = Node(), rhs = ... q = collections.deque([dq_node]) dq_joins = [] seen_joins = set() while q: curr = q.popleft() if not isinstance(curr, Expression): continue for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)): if isinstance(piece, DQ): query, joins = self.convert_dict_to_node(piece.query) for join in joins: if join not in seen_joins: dq_joins.append(join) seen_joins.add(join) expression = reduce(operator.and_, query) # Apply values from the DQ object. if piece._negated: expression = Negated(expression) #expression._alias = piece._alias setattr(curr, side, expression) else: q.append(piece) if not args or not kwargs: dq_node = dq_node.lhs query = self.clone() for field in dq_joins: if isinstance(field, ForeignKeyField): lm, rm = field.model, field.rel_model field_obj = field elif isinstance(field, BackrefAccessor): lm, rm = field.model, field.rel_model field_obj = field.field query = query.ensure_join(lm, rm, field_obj) return query.where(dq_node) def create_table(self, name, safe=True, **meta): return self.model._schema.create_table_as(name, self, safe, **meta) def __sql_selection__(self, ctx, is_subquery=False): if self._is_default and is_subquery and len(self._returning) > 1 and \ self.model._meta.primary_key is not False: return ctx.sql(self.model._meta.primary_key) return ctx.sql(CommaNodeList(self._returning)) class NoopModelSelect(ModelSelect): def __sql__(self, ctx): return self.model._meta.database.get_noop_select(ctx) def _get_cursor_wrapper(self, cursor): return CursorWrapper(cursor) class _ModelWriteQueryHelper(_ModelQueryHelper): def __init__(self, model, *args, **kwargs): self.model = model super(_ModelWriteQueryHelper, self).__init__(model, *args, **kwargs) def returning(self, *returning): accum = [] for item in returning: if is_model(item): accum.extend(item._meta.sorted_fields) else: accum.append(item) return super(_ModelWriteQueryHelper, self).returning(*accum) def _set_table_alias(self, ctx): table = self.model._meta.table ctx.alias_manager[table] = table.__name__ class ModelUpdate(_ModelWriteQueryHelper, Update): pass class ModelInsert(_ModelWriteQueryHelper, Insert): default_row_type = ROW.TUPLE def __init__(self, *args, **kwargs): super(ModelInsert, self).__init__(*args, **kwargs) if self._returning is None and self.model._meta.database is not None: if self.model._meta.database.returning_clause: self._returning = self.model._meta.get_primary_keys() def returning(self, *returning): # By default ModelInsert will yield a `tuple` containing the # primary-key of the newly inserted row. But if we are explicitly # specifying a returning clause and have not set a row type, we will # default to returning model instances instead. if returning and self._row_type is None: self._row_type = ROW.MODEL return super(ModelInsert, self).returning(*returning) def get_default_data(self): return self.model._meta.defaults def get_default_columns(self): fields = self.model._meta.sorted_fields return fields[1:] if self.model._meta.auto_increment else fields class ModelDelete(_ModelWriteQueryHelper, Delete): pass class ManyToManyQuery(ModelSelect): def __init__(self, instance, accessor, rel, *args, **kwargs): self._instance = instance self._accessor = accessor self._src_attr = accessor.src_fk.rel_field.name self._dest_attr = accessor.dest_fk.rel_field.name super(ManyToManyQuery, self).__init__(rel, (rel,), *args, **kwargs) def _id_list(self, model_or_id_list): if isinstance(model_or_id_list[0], Model): return [getattr(obj, self._dest_attr) for obj in model_or_id_list] return model_or_id_list def add(self, value, clear_existing=False): if clear_existing: self.clear() accessor = self._accessor src_id = getattr(self._instance, self._src_attr) if isinstance(value, SelectQuery): query = value.columns( Value(src_id), accessor.dest_fk.rel_field) accessor.through_model.insert_from( fields=[accessor.src_fk, accessor.dest_fk], query=query).execute() else: value = ensure_tuple(value) if not value: return inserts = [{ accessor.src_fk.name: src_id, accessor.dest_fk.name: rel_id} for rel_id in self._id_list(value)] accessor.through_model.insert_many(inserts).execute() def remove(self, value): src_id = getattr(self._instance, self._src_attr) if isinstance(value, SelectQuery): column = getattr(value.model, self._dest_attr) subquery = value.columns(column) return (self._accessor.through_model .delete() .where( (self._accessor.dest_fk << subquery) & (self._accessor.src_fk == src_id)) .execute()) else: value = ensure_tuple(value) if not value: return return (self._accessor.through_model .delete() .where( (self._accessor.dest_fk << self._id_list(value)) & (self._accessor.src_fk == src_id)) .execute()) def clear(self): src_id = getattr(self._instance, self._src_attr) return (self._accessor.through_model .delete() .where(self._accessor.src_fk == src_id) .execute()) def safe_python_value(conv_func): def validate(value): try: return conv_func(value) except (TypeError, ValueError): return value return validate class BaseModelCursorWrapper(DictCursorWrapper): def __init__(self, cursor, model, columns): super(BaseModelCursorWrapper, self).__init__(cursor) self.model = model self.select = columns or [] def _initialize_columns(self): combined = self.model._meta.combined table = self.model._meta.table description = self.cursor.description self.ncols = len(self.cursor.description) self.columns = [] self.converters = converters = [None] * self.ncols self.fields = fields = [None] * self.ncols for idx, description_item in enumerate(description): column = description_item[0] dot_index = column.find('.') if dot_index != -1: column = column[dot_index + 1:] column = column.strip('")') self.columns.append(column) try: raw_node = self.select[idx] except IndexError: if column in combined: raw_node = node = combined[column] else: continue else: node = raw_node.unwrap() # Heuristics used to attempt to get the field associated with a # given SELECT column, so that we can accurately convert the value # returned by the database-cursor into a Python object. if isinstance(node, Field): if raw_node._coerce: converters[idx] = node.python_value fields[idx] = node if not raw_node.is_alias(): self.columns[idx] = node.name elif isinstance(node, ColumnBase) and raw_node._converter: converters[idx] = raw_node._converter elif isinstance(node, Function) and node._coerce: if node._python_value is not None: converters[idx] = node._python_value elif node.arguments and isinstance(node.arguments[0], Node): # If the first argument is a field or references a column # on a Model, try using that field's conversion function. # This usually works, but we use "safe_python_value()" so # that if a TypeError or ValueError occurs during # conversion we can just fall-back to the raw cursor value. first = node.arguments[0].unwrap() if isinstance(first, Entity): path = first._path[-1] # Try to look-up by name. first = combined.get(path) if isinstance(first, Field): converters[idx] = safe_python_value(first.python_value) elif column in combined: if node._coerce: converters[idx] = combined[column].python_value if isinstance(node, Column) and node.source == table: fields[idx] = combined[column] initialize = _initialize_columns def process_row(self, row): raise NotImplementedError class ModelDictCursorWrapper(BaseModelCursorWrapper): def process_row(self, row): result = {} columns, converters = self.columns, self.converters fields = self.fields for i in range(self.ncols): attr = columns[i] if attr in result: continue # Don't overwrite if we have dupes. if converters[i] is not None: result[attr] = converters[i](row[i]) else: result[attr] = row[i] return result class ModelTupleCursorWrapper(ModelDictCursorWrapper): constructor = tuple def process_row(self, row): columns, converters = self.columns, self.converters return self.constructor([ (converters[i](row[i]) if converters[i] is not None else row[i]) for i in range(self.ncols)]) class ModelNamedTupleCursorWrapper(ModelTupleCursorWrapper): def initialize(self): self._initialize_columns() attributes = [] for i in range(self.ncols): attributes.append(self.columns[i]) self.tuple_class = collections.namedtuple('Row', attributes) self.constructor = lambda row: self.tuple_class(*row) class ModelObjectCursorWrapper(ModelDictCursorWrapper): def __init__(self, cursor, model, select, constructor): self.constructor = constructor self.is_model = is_model(constructor) super(ModelObjectCursorWrapper, self).__init__(cursor, model, select) def process_row(self, row): data = super(ModelObjectCursorWrapper, self).process_row(row) if self.is_model: # Clear out any dirty fields before returning to the user. obj = self.constructor(__no_default__=1, **data) obj._dirty.clear() return obj else: return self.constructor(**data) class ModelCursorWrapper(BaseModelCursorWrapper): def __init__(self, cursor, model, select, from_list, joins): super(ModelCursorWrapper, self).__init__(cursor, model, select) self.from_list = from_list self.joins = joins def initialize(self): self._initialize_columns() selected_src = set([field.model for field in self.fields if field is not None]) select, columns = self.select, self.columns self.key_to_constructor = {self.model: self.model} self.src_is_dest = {} self.src_to_dest = [] accum = collections.deque(self.from_list) dests = set() while accum: curr = accum.popleft() if isinstance(curr, Join): accum.append(curr.lhs) accum.append(curr.rhs) continue if curr not in self.joins: continue is_dict = isinstance(curr, dict) for key, attr, constructor, join_type in self.joins[curr]: if key not in self.key_to_constructor: self.key_to_constructor[key] = constructor # (src, attr, dest, is_dict, join_type). self.src_to_dest.append((curr, attr, key, is_dict, join_type)) dests.add(key) accum.append(key) # Ensure that we accommodate everything selected. for src in selected_src: if src not in self.key_to_constructor: if is_model(src): self.key_to_constructor[src] = src elif isinstance(src, ModelAlias): self.key_to_constructor[src] = src.model # Indicate which sources are also dests. for src, _, dest, _, _ in self.src_to_dest: self.src_is_dest[src] = src in dests and (dest in selected_src or src in selected_src) self.column_keys = [] for idx, node in enumerate(select): key = self.model field = self.fields[idx] if field is not None: if isinstance(field, FieldAlias): key = field.source else: key = field.model else: if isinstance(node, Node): node = node.unwrap() if isinstance(node, Column): key = node.source self.column_keys.append(key) def process_row(self, row): objects = {} object_list = [] for key, constructor in self.key_to_constructor.items(): objects[key] = constructor(__no_default__=True) object_list.append(objects[key]) default_instance = objects[self.model] set_keys = set() for idx, key in enumerate(self.column_keys): # Get the instance corresponding to the selected column/value, # falling back to the "root" model instance. instance = objects.get(key, default_instance) column = self.columns[idx] value = row[idx] if value is not None: set_keys.add(key) if self.converters[idx]: value = self.converters[idx](value) if isinstance(instance, dict): instance[column] = value else: setattr(instance, column, value) # Need to do some analysis on the joins before this. for (src, attr, dest, is_dict, join_type) in self.src_to_dest: instance = objects[src] try: joined_instance = objects[dest] except KeyError: continue # If no fields were set on the destination instance then do not # assign an "empty" instance. if instance is None or dest is None or \ (dest not in set_keys and not self.src_is_dest.get(dest)): continue # If no fields were set on either the source or the destination, # then we have nothing to do here. if instance not in set_keys and dest not in set_keys \ and join_type.endswith('OUTER JOIN'): continue if is_dict: instance[attr] = joined_instance else: setattr(instance, attr, joined_instance) # When instantiating models from a cursor, we clear the dirty fields. for instance in object_list: if isinstance(instance, Model): instance._dirty.clear() return objects[self.model] class PrefetchQuery(collections.namedtuple('_PrefetchQuery', ( 'query', 'fields', 'is_backref', 'rel_models', 'field_to_name', 'model'))): def __new__(cls, query, fields=None, is_backref=None, rel_models=None, field_to_name=None, model=None): if fields: if is_backref: if rel_models is None: rel_models = [field.model for field in fields] foreign_key_attrs = [field.rel_field.name for field in fields] else: if rel_models is None: rel_models = [field.rel_model for field in fields] foreign_key_attrs = [field.name for field in fields] field_to_name = list(zip(fields, foreign_key_attrs)) model = query.model return super(PrefetchQuery, cls).__new__( cls, query, fields, is_backref, rel_models, field_to_name, model) def populate_instance(self, instance, id_map): if self.is_backref: for field in self.fields: identifier = instance.__data__[field.name] key = (field, identifier) if key in id_map: setattr(instance, field.name, id_map[key]) else: for field, attname in self.field_to_name: identifier = instance.__data__[field.rel_field.name] key = (field, identifier) rel_instances = id_map.get(key, []) for inst in rel_instances: setattr(inst, attname, instance) inst._dirty.clear() setattr(instance, field.backref, rel_instances) def store_instance(self, instance, id_map): for field, attname in self.field_to_name: identity = field.rel_field.python_value(instance.__data__[attname]) key = (field, identity) if self.is_backref: id_map[key] = instance else: id_map.setdefault(key, []) id_map[key].append(instance) def prefetch_add_subquery(sq, subqueries): fixed_queries = [PrefetchQuery(sq)] for i, subquery in enumerate(subqueries): if isinstance(subquery, tuple): subquery, target_model = subquery else: target_model = None if not isinstance(subquery, Query) and is_model(subquery) or \ isinstance(subquery, ModelAlias): subquery = subquery.select() subquery_model = subquery.model fks = backrefs = None for j in reversed(range(i + 1)): fixed = fixed_queries[j] last_query = fixed.query last_model = last_obj = fixed.model if isinstance(last_model, ModelAlias): last_model = last_model.model rels = subquery_model._meta.model_refs.get(last_model, []) if rels: fks = [getattr(subquery_model, fk.name) for fk in rels] pks = [getattr(last_obj, fk.rel_field.name) for fk in rels] else: backrefs = subquery_model._meta.model_backrefs.get(last_model) if (fks or backrefs) and ((target_model is last_obj) or (target_model is None)): break if not fks and not backrefs: tgt_err = ' using %s' % target_model if target_model else '' raise AttributeError('Error: unable to find foreign key for ' 'query: %s%s' % (subquery, tgt_err)) dest = (target_model,) if target_model else None if fks: expr = reduce(operator.or_, [ (fk << last_query.select(pk)) for (fk, pk) in zip(fks, pks)]) subquery = subquery.where(expr) fixed_queries.append(PrefetchQuery(subquery, fks, False, dest)) elif backrefs: expressions = [] for backref in backrefs: rel_field = getattr(subquery_model, backref.rel_field.name) fk_field = getattr(last_obj, backref.name) expressions.append(rel_field << last_query.select(fk_field)) subquery = subquery.where(reduce(operator.or_, expressions)) fixed_queries.append(PrefetchQuery(subquery, backrefs, True, dest)) return fixed_queries def prefetch(sq, *subqueries): if not subqueries: return sq fixed_queries = prefetch_add_subquery(sq, subqueries) deps = {} rel_map = {} for pq in reversed(fixed_queries): query_model = pq.model if pq.fields: for rel_model in pq.rel_models: rel_map.setdefault(rel_model, []) rel_map[rel_model].append(pq) deps.setdefault(query_model, {}) id_map = deps[query_model] has_relations = bool(rel_map.get(query_model)) for instance in pq.query: if pq.fields: pq.store_instance(instance, id_map) if has_relations: for rel in rel_map[query_model]: rel.populate_instance(instance, deps[rel.model]) return list(pq.query)
the-stack_0_8135
import logging, sys, time class Logger: def __init__(self): self.activatedLogger = False def animation(self, string=None): if string: sys.stdout.write(string) sys.stdout.flush() sys.stdout.write(".") sys.stdout.flush() time.sleep(0.8) sys.stdout.write(".") sys.stdout.flush() time.sleep(0.8) sys.stdout.write(".") sys.stdout.flush() time.sleep(1) print("\n") def activateLogger(self): self.activatedLogger = True return self def logprint(self, content, animated=False, clog=True): if animated & clog: self.animation(content) elif clog: print(content) if self.activatedLogger: logging.info(content) try: logging.basicConfig(format='%(message)s',filename='logs/datafarm.log', level=logging.INFO) globalLogger=Logger().activateLogger().logprint except FileNotFoundError: print("No `logs` folder found. No logs will be stored...") time.sleep(3) globalLogger=Logger().logprint
the-stack_0_8136
#!/usr/bin/env python # -*- coding: utf-8 -*- import logging def create_logger(name, log_file=None): """ use different log level for file and stream """ l = logging.getLogger(name) formatter = logging.Formatter('[%(asctime)s] %(message)s') l.setLevel(logging.DEBUG) sh = logging.StreamHandler() sh.setFormatter(formatter) sh.setLevel(logging.INFO) l.addHandler(sh) if log_file is not None: fh = logging.FileHandler(log_file) fh.setFormatter(formatter) fh.setLevel(logging.DEBUG) l.addHandler(fh) return l if __name__ == '__main__': logger = create_logger('test') logger = create_logger('test', 'log.txt') logger.info('output to file and stream') logger.debug('output to file')
the-stack_0_8138
from boggle import Boggle from flask import Flask, render_template, session, jsonify, request # from flask_debugtoolbar import DebugToolbarExtension app = Flask(__name__) app.config["SECRET_KEY"] = "boggleSecretKey99" # debug = DebugToolbarExtension(app) boggle_game = Boggle() @app.route('/') def landing_page(): """Displays the homepage""" return render_template('home.html', css='home.css') @app.route('/game') def game_board(): """Handles displaying the game itself""" board = boggle_game.make_board() session['board'] = board games = session.get('games', 0) high_score = session.get('high-score', 0) return render_template('game_board.html', css='game_board.css', games=games, high_score=high_score) @app.route('/rules-gameplay') def rules_gameplay_page(): """Handles the rules and game play page""" return render_template('rules.html', css='rules.css') @app.route('/game/word-guess') def check_word(): """Checks if the word submitted exists in the words file""" word = request.args['word'] res = {"result": boggle_game.check_valid_word(session['board'], word)} return jsonify(res) @app.route('/game/update', methods=["POST"]) def update_scores(): """Handles updating the games played, and checking/updating of the high score""" games = session.get('games', 0) high_score = session.get('high-score', 0) score = request.json['score'] session['games'] = games + 1 session['high-score'] = max(score, high_score) return jsonify(new_record=score > high_score)
the-stack_0_8139
# -*- coding: utf-8 -*- """ @author: Miguel Ángel López Robles """ #from PyDBOD import loop import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc #from PyDBOD.ldof import LDOF import sys sys.path.append("..") from ldof import LDOF from load import load_data ######################## ### test with data generated ################## np.random.seed(42) # Generate train data X_inliers = 0.3 * np.random.randn(100, 2) X_inliers = np.r_[X_inliers + 2, X_inliers - 2] # Generate some outliers X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2)) X = np.r_[X_inliers, X_outliers] n_outliers = len(X_outliers) ground_truth = np.ones(len(X), dtype=int) ground_truth[-n_outliers:] = -1 # use my class ldof = LDOF() coef = ldof.fit_predict(X) #print(coef) y = np.zeros(200,dtype=np.int) y_outlier = np.ones(20,dtype=np.int) y = np.append(y, y_outlier) color = np.array(['k','b']) plt.title("Local Distance-based Outlier Factor (LDOF)") plt.scatter(X[:, 0], X[:, 1], color=color[y], s=3., label='Data points') # plot circles with radius proportional to the outlier scores radius = (coef - coef.min()) / (coef.max() - coef.min()) plt.scatter(X[:, 0], X[:, 1], s=500 * coef, edgecolors='r', facecolors='none', label='Outlier scores') plt.axis('tight') plt.xlim((-5, 5)) plt.ylim((-5, 5)) #plt.xlabel("prediction errors: %d" % (n_errors)) legend = plt.legend(loc='upper left') legend.legendHandles[0]._sizes = [10] legend.legendHandles[1]._sizes = [20] plt.show() y = np.zeros(200) y_outlier = np.ones(20) y = np.append(y, y_outlier) fpr, tpr, _ = roc_curve(y,coef) roc_auc = auc(fpr, tpr) print(roc_auc) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('LDOF') plt.legend(loc="lower right") plt.show() import os os.chdir("..") ############################### ## load a file ############# data = load_data("./data/shuttle-c0-vs-c4.dat") # k = 20 #data = load_data("./data/glass5.dat", sep = ', ') #k=19 #data = load_data("./data/ecoli-0-1-3-7_vs_2-6.dat") #k=25 #data = load_data("./data/yeast5.dat", sep = ', ') #65,65 ldof = LDOF(k=240) coef = ldof.fit_predict(data[:,:-1]) coef_n = (coef - coef.min()) / (coef.max() - coef.min()) #print(coef) #print(coef_n) fpr, tpr, _ = roc_curve(data[:,-1],coef_n) roc_auc = auc(fpr, tpr) print(roc_auc) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('LDOF') plt.legend(loc="lower right") plt.show()
the-stack_0_8140
""" The CharacteristicsHandler will receive a file path, read out its characteristics as needed and return a dictionary with them. More functions can will be added in the future. Tip for usage: import characteristicshandler.CharacteristicsHandler as chan chars = chan.handle_file_path("/path/to/file.hi") """ import os from datetime import datetime class CharacteristicsHandler: """ Class to read out file properties. """ @staticmethod def handle_file_path(file_path: str): # (str) -> Dict[str, str] """ Function to receive a file string and return its characteristics as a dictionary. To be treated as a class, maybe will become one in the future. """ chars = { "name": '', "extension": '', "orig_name": '', "entry_date": '', "keywords": '', # e.g. "word1, word2, word3" to be used with "in" "read_last": '', "updated_last": ''} file_name = file_path.split(os.sep)[-1] chars['orig_name'] = chars['name'] = file_name chars['read_last'] = chars['updated_last'] = chars['entry_date'] = str( datetime.now()) split_name = file_name.split('.') if len(split_name) > 1: chars['extension'] = split_name[-1] return chars if __name__ == '__main__': os.system('touch test test.txt test.testing.tested.txt') chan = CharacteristicsHandler() print(chan.handle_file_path('test')) print(chan.handle_file_path('test.txt')) print(chan.handle_file_path('test.testing.tested.txt')) os.system('rm test test.txt test.testing.tested.txt')
the-stack_0_8141
from copy import deepcopy from typing import Union, Dict, Any, List from checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes from checkov.common.graph.graph_builder.utils import calculate_hash, join_trimmed_strings from checkov.common.graph.graph_builder.variable_rendering.breadcrumb_metadata import BreadcrumbMetadata class Block: def __init__( self, name: str, config: Dict[str, Any], path: str, block_type: str, attributes: Dict[str, Any], id: str = "", source: str = "", ) -> None: """ :param name: unique name given to the block, for example :param config: the section in tf_definitions that belong to this block :param path: the file location of the block :param block_type: str :param attributes: dictionary of the block's original attributes in the origin file """ self.name = name self.config = deepcopy(config) self.path = path self.block_type = block_type self.attributes = attributes self.id = id self.source = source self.changed_attributes: Dict[str, List[Any]] = {} self.breadcrumbs: Dict[str, List[Dict[str, Any]]] = {} attributes_to_add = self._extract_inner_attributes() self.attributes.update(attributes_to_add) def _extract_inner_attributes(self) -> Dict[str, Any]: attributes_to_add = {} for attribute_key in self.attributes: attribute_value = self.attributes[attribute_key] if isinstance(attribute_value, dict) or (isinstance(attribute_value, list) and len(attribute_value) > 0 and isinstance(attribute_value[0], dict)): inner_attributes = get_inner_attributes(attribute_key, attribute_value) attributes_to_add.update(inner_attributes) return attributes_to_add def __str__(self) -> str: return f"{self.block_type}: {self.name}" def get_attribute_dict(self) -> Dict[str, Any]: """ :return: map of all the block's native attributes (from the source file), combined with the attributes generated by the module builder. If the attributes are not a primitive type, they are converted to strings. """ base_attributes = self.get_base_attributes() self.get_origin_attributes(base_attributes) if self.changed_attributes: # add changed attributes only for calculating the hash base_attributes["changed_attributes"] = sorted(self.changed_attributes.keys()) if self.breadcrumbs: sorted_breadcrumbs = dict(sorted(self.breadcrumbs.items())) base_attributes[CustomAttributes.RENDERING_BREADCRUMBS] = sorted_breadcrumbs base_attributes[CustomAttributes.HASH] = calculate_hash(base_attributes) if "changed_attributes" in base_attributes: # removed changed attributes if it was added previously for calculating hash. del base_attributes["changed_attributes"] return base_attributes def get_origin_attributes(self, base_attributes: Dict[str, Any]) -> None: for attribute_key in list(self.attributes.keys()): attribute_value = self.attributes[attribute_key] if isinstance(attribute_value, list) and len(attribute_value) == 1: attribute_value = attribute_value[0] if isinstance(attribute_value, (list, dict)): inner_attributes = get_inner_attributes(attribute_key, attribute_value) base_attributes.update(inner_attributes) if attribute_key == "self": base_attributes["self_"] = attribute_value continue else: base_attributes[attribute_key] = attribute_value def get_hash(self) -> str: attributes_dict = self.get_attribute_dict() return attributes_dict.get(CustomAttributes.HASH, "") def update_attribute( self, attribute_key: str, attribute_value: Any, change_origin_id: int, previous_breadcrumbs: List[BreadcrumbMetadata], attribute_at_dest: str ) -> None: if not previous_breadcrumbs or previous_breadcrumbs[-1].vertex_id != change_origin_id: previous_breadcrumbs.append(BreadcrumbMetadata(change_origin_id, attribute_at_dest)) self.update_inner_attribute(attribute_key, self.attributes, attribute_value) attribute_key_parts = attribute_key.split(".") if len(attribute_key_parts) == 1: self.attributes[attribute_key] = attribute_value self.changed_attributes[attribute_key] = previous_breadcrumbs return for i in range(len(attribute_key_parts)): key = join_trimmed_strings(char_to_join=".", str_lst=attribute_key_parts, num_to_trim=i) if key.find(".") > -1: self.attributes[key] = attribute_value attribute_value = {attribute_key_parts[len(attribute_key_parts) - 1 - i]: attribute_value} self.changed_attributes[key] = previous_breadcrumbs def update_inner_attribute( self, attribute_key: str, nested_attributes: Union[List[Any], Dict[str, Any]], value_to_update: Any ) -> None: split_key = attribute_key.split(".") i = 1 curr_key = ".".join(split_key[0:i]) if isinstance(nested_attributes, list): if curr_key.isnumeric(): curr_key_int = int(curr_key) if curr_key_int < len(nested_attributes): if not isinstance(nested_attributes[curr_key_int], dict): nested_attributes[curr_key_int] = value_to_update else: self.update_inner_attribute( ".".join(split_key[i:]), nested_attributes[curr_key_int], value_to_update ) else: for inner in nested_attributes: self.update_inner_attribute(curr_key, inner, value_to_update) elif isinstance(nested_attributes, dict): while curr_key not in nested_attributes and i <= len(split_key): i += 1 curr_key = ".".join(split_key[0:i]) if attribute_key in nested_attributes.keys(): nested_attributes[attribute_key] = value_to_update if len(split_key) == 1 and len(curr_key) > 0: nested_attributes[curr_key] = value_to_update elif curr_key in nested_attributes.keys(): self.update_inner_attribute(".".join(split_key[i:]), nested_attributes[curr_key], value_to_update) def get_export_data(self) -> Dict[str, Union[bool, str]]: return {"type": self.block_type, "name": self.name, "path": self.path} def get_base_attributes(self) -> Dict[str, Union[str, List[str], Dict[str, Any]]]: return { CustomAttributes.BLOCK_NAME: self.name, CustomAttributes.BLOCK_TYPE: self.block_type, CustomAttributes.FILE_PATH: self.path, CustomAttributes.CONFIG: self.config, CustomAttributes.LABEL: str(self), CustomAttributes.ID: self.id, CustomAttributes.SOURCE: self.source, } def get_inner_attributes(attribute_key: str, attribute_value: Union[str, List[str], Dict[str, Any]]) -> Dict[str, Any]: inner_attributes: Dict[str, Any] = {} if isinstance(attribute_value, list) and len(attribute_value) == 1: attribute_value = attribute_value[0] if isinstance(attribute_value, (dict, list)): inner_attributes[attribute_key] = [None] * len(attribute_value) if isinstance(attribute_value, list) else {} iterator: Union[range, List[str]] = range(len(attribute_value)) if isinstance(attribute_value, list) else list(attribute_value.keys()) for key in iterator: if key != "": inner_key = f"{attribute_key}.{key}" inner_value = attribute_value[key] inner_attributes.update(get_inner_attributes(inner_key, inner_value)) inner_attributes[attribute_key][key] = inner_attributes[inner_key] else: del attribute_value[key] else: inner_attributes[attribute_key] = attribute_value return inner_attributes
the-stack_0_8143
# -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # Jens Krüger <[email protected]> # # ***************************************************************************** """Some devices to simulate the PGAA hardware devices.""" from nicos.core import Attach, Override, Param, Readable class PushReader(Readable): """Read back device for the sample pusher sensors. Since one of the sensors must give the inverse of the `moveable` value this will be achieved by setting the parameter `inverse` at the corresponding device in configuration. """ hardware_access = False attached_devices = { 'moveable': Attach('Active device', Readable), } parameters = { 'inverse': Param('Invert read value', type=bool, default=False), } parameter_overrides = { 'unit': Override(default='', mandatory=False), 'fmtstr': Override(default='%d'), } mapping = { 'up': 0, 'down': 1, } fallback = -1 def doRead(self, maxage=0): if self.inverse: return not self._readRaw(maxage) return self._readRaw(maxage) def _readRaw(self, maxage=0): val = self._attached_moveable.read(maxage) return self.mapping.get(val, self.fallback)
the-stack_0_8144
# -*- coding: utf-8 -*- """Language Tour: Generators""" from typing import List, Tuple, Set, Generator, Dict, Iterable, Iterator if __name__ == "__main__": # Ternary compare val: int = 32 print(val if val >= 0 else -val) # List var_list: List[int] = [i for i in range(20) if i % 3 > 0] # => [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121] var_list: List[Tuple[int]] = [(i, j) for i in range(2) for j in range(3)] # => [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)] # Set var_set: Set[int] = {n**2 for n in range(12)} # Dict var_set: Dict[int, int] = {n: n**2 for n in range(6)} # => {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25} # Generator/Iterable/Iterator G: Generator[int, None, None] = (n**2 for n in range(12)) G: Iterable[int] = (n**2 for n in range(12)) # Implique G: Iterator[int] = (n**2 for n in range(12)) # Equivalent list(G) # => [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121] list(G) # => [] # Car, itérable qu'une seule fois ! # NOTE: Le type hint indique ici : [YieldType, SendType, ReturnType] # Le choix dépend de l'usage de la fonction. def gen() -> Iterable[int]: """Generates x^2 from x=0 to x=11.""" for idx in range(12): yield idx**2 # A la place de retourner une seule valeur, # on en retourne plusieurs print(*gen()) # => 0 1 4 9 16 25 36 49 64 81 100 121 # Exemple de fonction def gen_primes(max_range: int) -> Iterable[int]: """Generate primes up to max_range""" primes = set() for idx in range(2, max_range): if all(idx % p > 0 for p in primes): primes.add(idx) yield idx print(*gen_primes(100)) # => 2 3 5 7 11 13 17 19 23 29 31 37 41 43 47 53 59 61 67 71 73 79 83 89 97 for prime in gen_primes(100): print(prime) # 2 # 3 # 5 # 7 # 11 # 13 # 17 # 19 # 23 # 29 # 31 # 37 # 41 # 43 # 47 # 53 # 59 # 61 # 67 # 71 # 73 # 79 # 83 # 89 # 97
the-stack_0_8147
# Copyright (C) 2020 University of Oxford # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import pickle import netCDF4 import numpy as np import pandas as pd from requests import get # opening netCDF4 files via url is not reliable # (it requires the package to be built with OPenDAP support) # we dowload and write to disk the file before opening it def download_MET_file(url, file_name): try: os.remove(file_name) except: pass # dowload the file from url and save it on disk # get request response = get(url) if response.status_code != 200: return False # open in binary mode with open(file_name, "wb") as file: # write to file file.write(response.content) file.close() return True def load_local_data(): # load the variables dict with open("plugins/WEATHER/input/weather_indicators.json", "r") as read_file: weather_indicators = json.load(read_file) # load grid to GADM level 1 dict with open('plugins/WEATHER/input/adm_1_info.pkl', 'rb') as handle: adm_1_info = pickle.load(handle) # load grid to GADM level 2 dict with open('plugins/WEATHER/input/adm_2_info.pkl', 'rb') as handle: adm_2_info = pickle.load(handle) return weather_indicators, adm_1_info, adm_2_info # dowload the weather data for a single variable for all days in daterange # use the adm_1_info and adm_2_info to assign each point in the grid to the right # GID at level 1 or 2. the dicts also contains the GADM informations on each GID # returns a pandas dataframe def create_aggr_df(indicator, day, variables, adm_1_info, adm_2_info, logger): source = [] date = [] gid = [] country = [] countrycode = [] adm_area_1 = [] adm_area_2 = [] adm_area_3 = [] avg = [] std = [] samplesize = [] valid_percentage = [] logger.debug("downloading data for {} for {}".format(indicator, day.strftime('%Y-%m-%d'))) URL = "https://metdatasa.blob.core.windows.net/covid19-response/metoffice_global_daily/" temp_file = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'netCDF4_file.nc') if not download_MET_file("{}{}/{}{}.nc".format(URL, variables[indicator]['folder'], variables[indicator]['file'], day.strftime('%Y%m%d')), file_name=temp_file): return None nc = netCDF4.Dataset(temp_file) data = nc.variables[variables[indicator]['variable']][:].data.reshape(-1) if 'cloudaltitude' in indicator: # remove default values 9*10^36 data[data > 10e20] = np.nan # Level 1 aggregation for area_0 in adm_1_info: for area_1 in adm_1_info[area_0]: idx_list = [point[0] for point in adm_1_info[area_0][area_1]["points"]] to_avg = [data[idx] for idx in idx_list] samplesize.append(len(to_avg)) source.append("MET") date.append(day.strftime('%Y-%m-%d')) gid.append(adm_1_info[area_0][area_1]["gid"]) country.append(adm_1_info[area_0][area_1]["country"]) countrycode.append(adm_1_info[area_0][area_1]["countrycode"]) adm_area_1.append(adm_1_info[area_0][area_1]["adm_area_1"]) adm_area_2.append(adm_1_info[area_0][area_1]["adm_area_2"]) adm_area_3.append(adm_1_info[area_0][area_1]["adm_area_3"]) if 'cloudaltitude' in indicator: avg.append(np.nanmean(to_avg)) std.append(np.nanstd(to_avg, ddof=1)) valid_percentage.append(((~np.isnan(to_avg)).sum()) / (len(to_avg))) else: avg.append(np.mean(to_avg)) std.append(np.std(to_avg, ddof=1)) # Level 2 aggregation for area_0 in adm_2_info: for area_1 in adm_2_info[area_0]: for area_2 in adm_2_info[area_0][area_1]: idx_list = [point[0] for point in adm_2_info[area_0][area_1][area_2]["points"]] to_avg = [data[idx] for idx in idx_list] samplesize.append(len(to_avg)) source.append("MET") date.append(day.strftime('%Y-%m-%d')) gid.append(adm_2_info[area_0][area_1][area_2]["gid"]) country.append(adm_2_info[area_0][area_1][area_2]["country"]) countrycode.append(adm_2_info[area_0][area_1][area_2]["countrycode"]) adm_area_1.append(adm_2_info[area_0][area_1][area_2]["adm_area_1"]) adm_area_2.append(adm_2_info[area_0][area_1][area_2]["adm_area_2"]) adm_area_3.append(adm_2_info[area_0][area_1][area_2]["adm_area_3"]) if 'cloudaltitude' in indicator: avg.append(np.nanmean(to_avg)) std.append(np.nanstd(to_avg, ddof=1)) valid_percentage.append(((~np.isnan(to_avg)).sum()) / (len(to_avg))) else: avg.append(np.mean(to_avg)) std.append(np.std(to_avg, ddof=1)) if 'cloudaltitude' in indicator: d = {'source': source, 'date': date, 'gid': gid, 'country': country, 'countrycode': countrycode, 'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': adm_area_3, 'samplesize': samplesize, indicator+'_valid': valid_percentage, indicator+'_avg': avg, indicator+'_std': std, } else: d = {'source': source, 'date': date, 'gid': gid, 'country': country, 'countrycode': countrycode, 'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': adm_area_3, 'samplesize': samplesize, indicator+'_avg': avg, indicator+'_std': std, } try: os.remove(temp_file) except: pass return pd.DataFrame(data=d)
the-stack_0_8148
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from absl.testing import parameterized import numpy as np import six from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import tpu_strategy from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import random_seed from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.util import nest _RANDOM_SEED = 1337 _EVAL_STEPS = 20 _GLOBAL_BATCH_SIZE = 64 # Note: Please make sure the tests in this file are also covered in # keras_backward_compat_test for features that are supported with both APIs. all_strategies = [ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, strategy_combinations.tpu_strategy, # steps_per_run=2 strategy_combinations.tpu_strategy_one_step, ] def eager_mode_test_configuration(): return combinations.combine( mode='eager', use_numpy=[True, False], use_validation_data=[True, False]) def graph_mode_test_configuration(): return combinations.combine( mode='graph', use_numpy=[True, False], use_validation_data=[True, False]) def all_strategy_and_input_config_combinations(): return (combinations.times( combinations.combine( distribution=all_strategies, run_distributed=[True, False]), eager_mode_test_configuration() + graph_mode_test_configuration())) def strategy_minus_tpu_and_input_config_combinations_eager(): return (combinations.times( combinations.combine( distribution=strategy_combinations.strategies_minus_tpu), eager_mode_test_configuration())) def strategies_for_embedding_models(): """Returns distribution strategies to test for embedding models. Since embedding models take longer to train, we disregard DefaultStrategy in order to prevent testing timeouts. """ return [ s for s in all_strategies if s.required_tpu or s.required_gpus or s is strategy_combinations.one_device_strategy ] def test_combinations_for_embedding_model(): # TODO(sourabhbajaj): Enable tests for eager mode eager_mode_strategies = [ s for s in strategies_for_embedding_models() if not s.required_tpu ] return (combinations.times( combinations.combine( distribution=strategies_for_embedding_models(), run_distributed=[True, False]), (graph_mode_test_configuration())) + combinations.times( combinations.combine( distribution=eager_mode_strategies, run_distributed=[False]), (eager_mode_test_configuration()))) def test_combinations_with_tpu_strategies(): tpu_strategies = [ strategy_combinations.tpu_strategy, strategy_combinations.tpu_strategy_one_step ] return (combinations.times( combinations.combine(distribution=tpu_strategies), graph_mode_test_configuration())) class MaybeDistributionScope(object): """Provides a context allowing no distribution strategy.""" def __init__(self, distribution): self._distribution = distribution self._scope = None def __enter__(self): if self._distribution: self._scope = self._distribution.scope() self._scope.__enter__() def __exit__(self, exc_type, value, traceback): if self._distribution: self._scope.__exit__(exc_type, value, traceback) self._scope = None def batch_wrapper(dataset, batch_size, repeat=None): if repeat: dataset = dataset.repeat(repeat) return dataset.batch(batch_size) def get_batch_size(global_batch_size, distribution): batch_size = global_batch_size # TODO(b/118776054): Use global batch size for Keras/DS support. use_per_core_batch_size = ( distribution and not distributed_training_utils.global_batch_size_supported(distribution)) if use_per_core_batch_size: batch_size //= distribution.num_replicas_in_sync return batch_size def get_data_size(data): """Gets the size of data in list, tuple, dict, or a numpy array.""" assert isinstance(data, (np.ndarray, list, dict, tuple)) if isinstance(data, np.ndarray): return len(data) if isinstance(data, (list, tuple)): return len(data[0]) return len(six.next(six.itervalues(data))) def get_shapes(data): shapes = None if all(hasattr(x, 'shape') for x in nest.flatten(data)): shapes = nest.map_structure(lambda x: x.shape, data) return shapes def get_correctness_test_inputs(use_numpy, use_validation_data, with_distribution, x_train, y_train, x_eval, y_eval, x_predict, training_epochs): """Generates the inputs for correctness check when enable Keras with DS.""" global_batch_size = _GLOBAL_BATCH_SIZE batch_size = get_batch_size(global_batch_size, with_distribution) if use_numpy: training_inputs = { 'batch_size': batch_size, 'x': x_train, 'y': y_train, 'epochs': training_epochs, 'shuffle': False, } if use_validation_data: eval_inputs = None training_inputs['validation_data'] = (x_eval, y_eval) else: eval_inputs = { 'batch_size': batch_size, 'x': x_eval, 'y': y_eval, } predict_inputs = {'x': x_predict} else: training_data_size = get_data_size(x_train) # For dataset inputs, we do not pass batch_size to # keras.fit/evaluate/predict. The batch size is part of the dataset. train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train)) x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs) steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size)) training_inputs = { 'batch_size': None, 'x': x, 'y': None, 'epochs': training_epochs, 'shuffle': False, 'steps_per_epoch': steps_per_epoch } if use_validation_data: eval_inputs = None # Remove the eval_inputs eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval)) x = batch_wrapper(eval_dataset, batch_size) training_inputs['validation_data'] = x training_inputs['validation_steps'] = 5 else: eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval)) x = batch_wrapper(eval_dataset, batch_size) eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size)) eval_inputs = { 'batch_size': None, 'x': x, 'y': None, 'steps': eval_steps, } predict_batch_size = get_batch_size( get_data_size(x_predict), with_distribution) predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict) predict_dataset = batch_wrapper(predict_dataset, predict_batch_size) predict_inputs = { 'steps': 1, 'x': predict_dataset, } return training_inputs, eval_inputs, predict_inputs def fit_eval_and_predict(initial_weights, input_fn, model_fn, run_distributed=None, distribution=None, is_stateful_model=False): """Generates results for fit/predict/evaluate for given model.""" training_inputs, eval_inputs, predict_inputs = input_fn() model = model_fn( run_distributed=run_distributed, initial_weights=initial_weights, distribution=distribution, input_shapes=get_shapes(training_inputs['x'])) result = {} result['training_history_1'] = model.fit(**training_inputs).history if eval_inputs is not None: result['eval_result_1'] = model.evaluate(**eval_inputs) result['weights_1'] = model.get_weights() if predict_inputs is not None: # Check correctness of the result of predict() invoked # multiple times -- as for stateful models, result of # predict may differ for each batch. predict_length = 1 if is_stateful_model: predict_length = 3 for i in range(predict_length): result_key = 'predict_result_{}'.format(i) result[result_key] = model.predict(**predict_inputs) # Train and eval again to mimic user's flow. result['training_history_2'] = model.fit(**training_inputs).history if eval_inputs is not None: result['eval_result_2'] = model.evaluate(**eval_inputs) result['weights_2'] = model.get_weights() return result def compare_results(results_with_ds, results_without_ds, distribution, testcase, partial_last_batch=None): """Compares results of model compiled with/without distribution strategy.""" if partial_last_batch == 'train_and_eval': # We relax the tolerence a lot in the partial last batch case as # 1. the examples in uneven batches may have different weights when # applying the gradients in the distributed case. # 2. TF Keras and TF Keras DS have different ways to handle the case when # training with epochs > 1 with numpy inputs. In TF Keras, every epoch # may have a partial batch. While in TF Keras DS, as we convert # numpy inputs into dataset, it will do a repeat() first and calculate # steps_per_epoch, so it will at most have one partial batch. This # makes the 1-CPU result even different. default_tolerance = 1e-3 relaxed_tolerance = 1e-3 else: default_tolerance = 1e-5 relaxed_tolerance = 1e-4 def _get_compare_result_tolerance(key): """Returns tolerance to compare results.""" # TODO(b/119257215): For MirroredStrategy, weights are not exactly the same, # so use larger tolerance for now. Predict should be related to weights. if (isinstance(distribution, (mirrored_strategy.MirroredStrategy, distribute_lib._DefaultDistributionStrategy)) and # pylint: disable=protected-access key.startswith(('weights_1', 'weights_2', 'predict_result'))): return relaxed_tolerance return default_tolerance for key in sorted(results_with_ds.keys()): if (key.startswith('training_history') and isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and distribution.extended.steps_per_run > 1): # TODO(b/119894254): Enable this test for all cases once the # underlying bug is fixed. continue tolerance = _get_compare_result_tolerance(key) # We don't compare the loss as loss is currently not computed as metric # in Keras, the loss value is inaccurate for last partial batch due to # more weights for the last batch samples. if partial_last_batch is not None: if key.startswith('eval_result'): results_with_ds[key] = results_with_ds[key][1:] results_without_ds[key] = results_without_ds[key][1:] if key.startswith('training_history'): results_with_ds[key]['val_loss'] = 0 results_without_ds[key]['val_loss'] = 0 testcase.assertAllClose( results_with_ds[key], results_without_ds[key], atol=tolerance, rtol=tolerance, msg='Fail to assert {}.'.format(key)) def should_skip_tpu_with_eager(distribution): return (context.executing_eagerly() and isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1))) class LearningRateBatchScheduler(keras.callbacks.Callback): """Scheduler that dynamically sets the learning rate of model.""" def __init__(self, update_freq=None): self._update_freq = update_freq def on_batch_begin(self, batch, logs=None): if self._update_freq and batch % self._update_freq != 0: return # To avoid divergence, limit the value range. lr = 0.001 * (batch % 10) keras.backend.set_value(self.model.optimizer.lr, lr) class TestDistributionStrategyCorrectnessBase(test.TestCase, parameterized.TestCase): """Model agnostic testing infra to test correctness of Keras models.""" def set_up_test_config(self, use_numpy=False, use_validation_data=False, with_batch_norm=False): self.use_numpy = use_numpy self.use_validation_data = use_validation_data self.with_batch_norm = with_batch_norm keras.backend.set_image_data_format('channels_last') np.random.seed(_RANDOM_SEED) random_seed.set_random_seed(_RANDOM_SEED) def get_data(self): num_samples = 10000 x_train = np.random.randint(0, 2, num_samples) x_train = np.reshape(x_train, (num_samples, 1)) y_train = x_train return (x_train.astype('float32'), y_train.astype('float32'), None) def get_data_with_partial_last_batch(self): raise NotImplementedError def get_data_with_partial_last_batch_eval(self): raise NotImplementedError def get_input_for_correctness_test(self, **kwargs): """Generates inputs that are dictionaries. We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. Arguments: **kwargs: key word arguments about how to create the input dictionaries Returns: Three dictionaries representing the input for fit(), evalutate() and predict() """ return get_correctness_test_inputs(**kwargs) def get_model(self, distribution=None, run_distributed=None, input_shapes=None): raise NotImplementedError def skip_unsupported_test_configuration(self, distribution, run_distributed): if should_skip_tpu_with_eager(distribution) and run_distributed: self.skipTest( 'TPUStrategy does not support eager mode with run_distributed.') return def run_correctness_test(self, distribution, use_numpy, use_validation_data, run_distributed=None, with_batch_norm=False, is_stateful_model=False, partial_last_batch=None, training_epochs=2): with self.cached_session(): self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm) self.skip_unsupported_test_configuration(distribution, run_distributed) if partial_last_batch == 'eval': x_train, y_train, x_eval, y_eval, x_predict = ( self.get_data_with_partial_last_batch_eval()) elif partial_last_batch == 'train_and_eval': x_train, y_train, x_eval, y_eval, x_predict = ( self.get_data_with_partial_last_batch()) else: x_train, y_train, x_predict = self.get_data() x_eval = x_train y_eval = y_train # The model is built once and the initial weights are saved. # This is used to initialize the model for both the distribution and # non-distribution run. model = self.get_model( run_distributed=run_distributed, input_shapes=get_shapes(x_train)) initial_weights = model.get_weights() ds_input_fn = functools.partial( self.get_input_for_correctness_test, use_numpy=use_numpy, use_validation_data=use_validation_data, with_distribution=distribution, x_train=x_train, y_train=y_train, x_eval=x_eval, y_eval=y_eval, x_predict=x_predict, training_epochs=training_epochs) nods_input_fn = functools.partial( self.get_input_for_correctness_test, use_numpy=use_numpy, use_validation_data=use_validation_data, with_distribution=None, x_train=x_train, y_train=y_train, x_eval=x_eval, y_eval=y_eval, x_predict=x_predict, training_epochs=training_epochs) results_with_ds = fit_eval_and_predict( initial_weights, input_fn=ds_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=distribution, is_stateful_model=is_stateful_model) results_without_ds = fit_eval_and_predict( initial_weights, input_fn=nods_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=None, is_stateful_model=is_stateful_model) # First, special case, for multi-replica distributed training, batch # norm is not aggregated globally. So it is expected to have different # weights. if (self.with_batch_norm and distribution.num_replicas_in_sync > 1): with self.assertRaises(AssertionError): compare_results( results_with_ds, results_without_ds, distribution, testcase=self, partial_last_batch=partial_last_batch) else: compare_results( results_with_ds, results_without_ds, distribution, testcase=self, partial_last_batch=partial_last_batch) def get_input_for_dynamic_lr_test(self, **kwargs): """Generates inputs that are dictionaries. We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. Arguments: **kwargs: key word arguments about how to create the input dictionaries Returns: Three dictionaries representing the input for fit(), evalutate() and predict() """ training_input = kwargs return training_input, None, None def run_dynamic_lr_test(self, distribution, run_distributed=None): with self.cached_session(): self.set_up_test_config() self.skip_unsupported_test_configuration(distribution, run_distributed) x_train, y_train, _ = self.get_data() model = self.get_model( run_distributed=run_distributed, input_shapes=get_shapes(x_train)) initial_weights = model.get_weights() update_freq = None if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and distribution.extended.steps_per_run > 1): # For TPUStrategy with steps_per_run > 1, the callback is not invoked # every step. So, to compare the CPU/TPU, we let the CPU to behave the # same as TPU. update_freq = distribution.extended.steps_per_run training_epochs = 2 global_batch_size = 64 ds_batch_size = get_batch_size(global_batch_size, distribution) nods_batch_size = get_batch_size(global_batch_size, None) ds_input_fn = functools.partial( self.get_input_for_dynamic_lr_test, x=x_train, y=y_train, batch_size=ds_batch_size, shuffle=False, epochs=training_epochs, callbacks=[LearningRateBatchScheduler(update_freq)], validation_data=(x_train, y_train)) nods_input_fn = functools.partial( self.get_input_for_dynamic_lr_test, x=x_train, y=y_train, batch_size=nods_batch_size, shuffle=False, epochs=training_epochs, callbacks=[LearningRateBatchScheduler(update_freq)], validation_data=(x_train, y_train)) results_with_ds = fit_eval_and_predict( initial_weights, input_fn=ds_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=distribution) results_without_ds = fit_eval_and_predict( initial_weights, input_fn=nods_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=None) compare_results( results_with_ds, results_without_ds, distribution, testcase=self) class TestDistributionStrategyEmbeddingModelCorrectnessBase( TestDistributionStrategyCorrectnessBase): """Base class to test correctness of Keras models with embedding layers.""" def get_data(self, count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS), min_words=5, max_words=10, max_word_id=19, num_classes=2): distribution = [] for _ in range(num_classes): dist = np.abs(np.random.randn(max_word_id)) dist /= np.sum(dist) distribution.append(dist) features = [] labels = [] for _ in range(count): label = np.random.randint(0, num_classes, size=1)[0] num_words = np.random.randint(min_words, max_words, size=1)[0] word_ids = np.random.choice( max_word_id, size=num_words, replace=True, p=distribution[label]) word_ids = word_ids labels.append(label) features.append(word_ids) features = keras.preprocessing.sequence.pad_sequences( features, maxlen=max_words) x_train = np.asarray(features, dtype=np.float32) y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1)) x_predict = x_train[:_GLOBAL_BATCH_SIZE] return x_train, y_train, x_predict if __name__ == '__main__': test.main()
the-stack_0_8152
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 Raphael Michel and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # import logging from django import forms from django.conf import settings from django.utils.translation import gettext as _ from oauth2_provider.exceptions import FatalClientError, OAuthToolkitError from oauth2_provider.forms import AllowForm from oauth2_provider.settings import oauth2_settings from oauth2_provider.views import ( AuthorizationView as BaseAuthorizationView, RevokeTokenView as BaseRevokeTokenView, TokenView as BaseTokenView, ) from pretix.api.models import OAuthApplication from pretix.base.models import Organizer logger = logging.getLogger(__name__) class OAuthAllowForm(AllowForm): organizers = forms.ModelMultipleChoiceField( queryset=Organizer.objects.none(), widget=forms.CheckboxSelectMultiple ) def __init__(self, *args, **kwargs): user = kwargs.pop('user') scope = kwargs.pop('scope') super().__init__(*args, **kwargs) self.fields['organizers'].queryset = Organizer.objects.filter( pk__in=user.teams.values_list('organizer', flat=True)) if scope == 'profile': del self.fields['organizers'] class AuthorizationView(BaseAuthorizationView): template_name = "pretixcontrol/auth/oauth_authorization.html" form_class = OAuthAllowForm def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['user'] = self.request.user kwargs['scope'] = self.request.GET.get('scope') return kwargs def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx['settings'] = settings return ctx def validate_authorization_request(self, request): require_approval = request.GET.get("approval_prompt", oauth2_settings.REQUEST_APPROVAL_PROMPT) if require_approval != 'force' and request.GET.get('scope') != 'profile': raise FatalClientError('Combnination of require_approval and scope values not allowed.') return super().validate_authorization_request(request) def create_authorization_response(self, request, scopes, credentials, allow, organizers=None): credentials["organizers"] = organizers or [] return super().create_authorization_response(request, scopes, credentials, allow) def form_valid(self, form): client_id = form.cleaned_data["client_id"] application = OAuthApplication.objects.get(client_id=client_id) credentials = { "client_id": form.cleaned_data.get("client_id"), "redirect_uri": form.cleaned_data.get("redirect_uri"), "response_type": form.cleaned_data.get("response_type", None), "state": form.cleaned_data.get("state", None), } scopes = form.cleaned_data.get("scope") allow = form.cleaned_data.get("allow") try: uri, headers, body, status = self.create_authorization_response( request=self.request, scopes=scopes, credentials=credentials, allow=allow, organizers=form.cleaned_data.get("organizers") ) except OAuthToolkitError as error: return self.error_response(error, application) self.success_url = uri logger.debug("Success url for the request: {0}".format(self.success_url)) msgs = [ _('The application "{application_name}" has been authorized to access your account.').format( application_name=application.name ) ] self.request.user.send_security_notice(msgs) self.request.user.log_action('pretix.user.oauth.authorized', user=self.request.user, data={ 'application_id': application.pk, 'application_name': application.name, }) return self.redirect(self.success_url, application) class TokenView(BaseTokenView): pass class RevokeTokenView(BaseRevokeTokenView): pass
the-stack_0_8153
# Adapted from Sebastian Noack's python-goto, originally licensed under the # Unlicence and re-licenced under Apache 2.0 as part of Pomagma. import pytest from goto import goto, label, with_goto CODE = '''\ i = 0 result = [] label.start if i == 10: goto.end result.append(i) i += 1 goto.start label.end ''' EXPECTED = list(range(10)) def test_range_as_code(): ns = {} exec(with_goto(compile(CODE, '', 'exec')), ns) assert ns['result'] == EXPECTED def test_range_as_function(): ns = {} exec('\n'.join( ['def func():'] + ['\t' + x for x in CODE.splitlines() + ['return result']] ), ns) assert with_goto(ns['func'])() == EXPECTED def test_jump_out_of_loop(): @with_goto def func(): for i in range(10): goto.end label.end return i assert func() == 0 def test_jump_into_loop(): def func(): for i in range(10): label.loop goto.loop pytest.raises(SyntaxError, with_goto, func) def test_jump_out_of_nested_4_loops(): @with_goto def func(): for i in range(2): for j in range(2): for k in range(2): for m in range(2): goto.end label.end return (i, j, k, m) assert func() == (0, 0, 0, 0) def test_jump_out_of_nested_5_loops(): def func(): for i in range(2): for j in range(2): for k in range(2): for m in range(2): for n in range(2): goto.end label.end return (i, j, k, m, n) pytest.raises(SyntaxError, with_goto, func) def test_jump_across_loops(): def func(): for i in range(10): goto.other_loop for i in range(10): label.other_loop pytest.raises(SyntaxError, with_goto, func) def test_jump_out_of_try_block(): @with_goto def func(): try: rv = None goto.end except: rv = 'except' finally: rv = 'finally' label.end return rv assert func() is None def test_jump_into_try_block(): def func(): try: label.block except: pass goto.block pytest.raises(SyntaxError, with_goto, func) def test_jump_to_unkown_label(): def func(): goto.unknown pytest.raises(SyntaxError, with_goto, func) def test_function_is_copy(): def func(): pass func.foo = 'bar' newfunc = with_goto(func) assert newfunc is not func assert newfunc.foo == 'bar'
the-stack_0_8154
import numpy as np import pandas as pd import tensorflow as tf import math from sklearn.cluster import KMeans import Loaddata from numpy import random import time from datetime import date import matplotlib.pyplot as plt import os from pandas import DataFrame, concat import multiprocessing as mp class LSTM_double: # 定义常量 def __init__(self, data): self.rnn_unit = 300 self.input_size = 100 self.output_size = 1 self.lr = 0.00006 self.time_step = 1 self.batch_size = 1 self.data = self.series_to_supervised(data, 100) self.train_begin = 0 self.train_end = len(self.data) self.test_begin = len(self.data)-1 self.weights = { 'in': tf.Variable(tf.random_normal([self.input_size, self.rnn_unit])), 'out': tf.Variable(tf.random_normal([self.rnn_unit, self.output_size])) } self.biases = { 'in': tf.Variable(tf.constant(0.1, shape=[self.rnn_unit, ])), 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) } # 定义分割函数 def series_to_supervised(self, data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = DataFrame(data) cols, names = list(), list() for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] agg = concat(cols, axis=1) agg.columns = names if dropnan: agg.dropna(inplace=True) return agg.values # 获取训练集 def get_train_data(self): batch_index = [] data_train = self.data[self.train_begin:self.train_end] normalized_train_data = data_train/1e8 train_x, train_y = [], [] # 训练集 for i in range(len(normalized_train_data)-self.time_step): if i % self.batch_size == 0: batch_index.append(i) x = normalized_train_data[i:i+self.time_step, :100] y = normalized_train_data[i:i+self.time_step, 100:] train_x.append(x.tolist()) train_y.append(y.tolist()) batch_index.append((len(normalized_train_data)-self.time_step)) return batch_index, train_x, train_y # 获取测试集 def get_test_data(self): data_test = self.data[self.test_begin:] normalized_test_data = data_test/1e8 size = (len(normalized_test_data) + self.time_step)//self.time_step # 有size个sample test_x, test_y = [], [] for i in range(size-1): x = normalized_test_data[i * self.time_step:(i+1)*self.time_step, :100] y = normalized_test_data[i * self.time_step:(i+1)*self.time_step, 100] test_x.append(x.tolist()) test_y.extend(y) test_x.append( (normalized_test_data[(i+1)*self.time_step:, :100]).tolist()) test_y.extend( (normalized_test_data[(i+1)*self.time_step:, 100]).tolist()) return test_x, test_y # ——————————————————定义神经网络变量—————————————————— def lstm(self, X): self.batch_size = tf.shape(X)[0] self.time_step = tf.shape(X)[1] w_in = self.weights['in'] b_in = self.biases['in'] # 将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 input = tf.reshape(X, [-1, self.input_size]) input_rnn = tf.matmul(input, w_in)+b_in # 将tensor转成3维,作为lstm cell的输入 input_rnn = tf.reshape(input_rnn, [-1, self.time_step, self.rnn_unit]) cell = tf.nn.rnn_cell.LSTMCell(self.rnn_unit) init_state = cell.zero_state(self.batch_size, dtype=tf.float32) # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 output_rnn, final_states = tf.nn.dynamic_rnn( cell, input_rnn, initial_state=init_state, dtype=tf.float32) output = tf.reshape(output_rnn, [-1, self.rnn_unit]) # 作为输出层的输入 w_out = self.weights['out'] b_out = self.biases['out'] pred = tf.matmul(output, w_out)+b_out pred = tf.reshape(pred, [-1, self.output_size]) return pred, final_states # ——————————————————训练模型—————————————————— def train_lstm(self, num_epochs=40, numb_sub=1,numb_class=1,continue_train=False,class_people='purchase'): X = tf.placeholder(tf.float32, shape=[None, 1, 100]) Y = tf.placeholder(tf.float32, shape=[None, 1, 1]) batch_index, train_x, train_y = self.get_train_data() with tf.variable_scope("sec_lstm"): pred, _ = self.lstm(X) # 损失函数 loss = tf.reduce_mean( tf.square(tf.reshape(pred, [-1])-tf.reshape(Y, [-1]))) train_op = tf.train.AdamOptimizer(self.lr).minimize(loss) saver = tf.train.Saver(tf.global_variables(), max_to_keep=15) if continue_train==True: module_file = tf.train.latest_checkpoint('model_save_'+class_people+'_'+ str(numb_sub)+'_'+str(numb_class)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if continue_train==True: saver.restore(sess, module_file) # 重复训练 for i in range(num_epochs): for step in range(len(batch_index)-1): _, loss_ = sess.run([train_op, loss], feed_dict={ X: train_x[batch_index[step]:batch_index[step+1]], Y: train_y[batch_index[step]:batch_index[step+1]]}) print(i+1, loss_) if ((i+1) % num_epochs) == 0: print("保存模型:", saver.save(sess, 'model_save_'+class_people+'_' + str(numb_sub)+'_'+str(numb_class)+'/modle.ckpt', global_step=i)) # ————————————————预测模型———————————————————— def prediction(self, numb_sub=1,numb_class=1,class_people='purchase'): self.time_step = 1 self.input_size = 100 self.output_size = 1 X = tf.placeholder(tf.float32, shape=[ None, self.time_step, self.input_size]) Y = tf.placeholder(tf.float32, shape=[ None, self.time_step, self.output_size]) test_x, test_y = self.get_test_data() with tf.variable_scope("sec_lstm", reuse=tf.AUTO_REUSE): pred, _ = self.lstm(X) saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: # 参数恢复 module_file = tf.train.latest_checkpoint( 'model_save_'+class_people+'_'+str(numb_sub)+'_'+str(numb_class)) saver.restore(sess, module_file) test_x = test_x[:1] test_x = [a[0] for a in test_x] test_x = np.array(test_x) test_x[:, :99] = test_x[:, 1:] test_x[:, 99:] = test_y[-1] test_predict = [] for step in range(30): prob = sess.run(pred, feed_dict={X: [test_x]}) predict = prob.reshape(-1) test_predict.extend(prob) test_x[:, :99] = test_x[:, 1:] test_x[:, 99:] = prob[-1] test_predict = np.array(test_predict) test_predict = test_predict[:, 0] test_predict = test_predict.flatten() test_predict = np.array(test_predict)*1e8 print(test_predict) return test_predict class k_mean(object): def __init__(self, data): self.x_train = data def k_mean_divide(self, cluster_num): kmeans = KMeans(n_clusters=cluster_num, random_state=0).fit(self.x_train) divide_labels = kmeans.labels_ divide_class = {} for i in range(cluster_num): divide_answer = (divide_labels == i) divide = [] for j in range(len(divide_labels)): if divide_answer[j] == True: divide.append(j) divide_class['cluster'+str(i)] = np.array(divide)+1 return divide_class class genetic(object): def getEncoding(self, popSize, chromLength): # 生成种群 pop = random.randint(0, 2, size=(popSize, chromLength)) return pop def binary2decimal(self, pop, chromLength_type, chromLength): row = pop.shape[0] chromLength_length = len(chromLength_type) - 1 tempfinal = np.zeros((row, chromLength_length)) position_sum = np.cumsum(chromLength_type) for i in range(row): for j in range(chromLength_length): t = 0 for k in range(position_sum[j], position_sum[j+1]): t += pop[i, k]*(math.pow(2, k - position_sum[j])) tempfinal[i, j] = t tempfinal[:, 0] = tempfinal[:, 0]+1 tempfinal[:, 1:] = tempfinal[:, 1:]/(math.pow(2, 8)-1)*5 return tempfinal def multiprocess_fitness_purchase(self, j):# 并行计算 multiple_time = np.hstack((self.tempfinal[j, 1], np.tile( self.tempfinal[j, 2], 7), np.tile(self.tempfinal[j, 3], 12))) # 拼接倍数 for k in range(4, self.tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, self.tempfinal[j, k])) user_profile_onehot = self.user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(self.tempfinal[j, 0])) user_balance = Loaddata.UserBalance() purchase_predict_class = [] purchase_test_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 print('第'+str(j+1)+'个种群 第'+str(i+1)+'个类') user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) user_balance.CalculateDayRedeemList( divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataUsedInPredict() purchase_test, redeem_test = user_balance.GetTestData() purchase_model = LSTM_double(purchase_train.reshape((-1, 1))) purchase_model.train_lstm(numb_sub=j+1,numb_class=i+1) purchase_predict = purchase_model.prediction(numb_sub=j+1,numb_class=i+1) tf.reset_default_graph() plt.plot(purchase_predict, 'b') plt.plot(purchase_test, 'g') if not os.path.exists('out_lstm_double/'): os.makedirs('out_lstm_double/') plt.savefig('out_lstm_double/purchase_the_{}_times_the_{}_gene_the_{}_class.png'.format( str(self.times_calc), str(j+1), str(i+1))) plt.close() purchase_predict_class.append(purchase_predict) purchase_test_class.append(purchase_test) purchase_loss_value = np.mean(abs(np.array(purchase_predict_class).sum( axis=0) - np.array(purchase_test_class).sum(axis=0))/(np.array(purchase_test_class).sum(axis=0))) return 1/purchase_loss_value def fitness_purchase(self, tempfinal, user_profile_onehot, times_calc): # 适应度 self.user_profile_onehot = user_profile_onehot self.tempfinal = tempfinal self.times_calc = times_calc pool = mp.Pool(processes=tempfinal.shape[0]) purchase_loss_value = pool.map( self.multiprocess_fitness_purchase, range(tempfinal.shape[0])) pool.close() pool.join() return np.squeeze(purchase_loss_value) def fitness_predict_purchase(self,length_best, tempfinal, user_profile_onehot, user_balance): multiple_time = np.hstack((tempfinal[0, 1], np.tile( tempfinal[0, 2], 7), np.tile(tempfinal[0, 3], 12))) # 拼接倍数 for k in range(4, tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, tempfinal[0, k])) user_profile_onehot = user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(tempfinal[0, 0])) purchase_predict_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) user_balance.CalculateDayRedeemList(divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataAll() purchase_model = LSTM_double(purchase_train.reshape((-1, 1))) purchase_model.train_lstm(num_epochs = 10,numb_sub = length_best,numb_class=i+1,continue_train=True) purchase_predict = purchase_model.prediction(numb_sub=length_best,numb_class=i+1) tf.reset_default_graph() purchase_predict_class.append(purchase_predict) purchase_predict_return = np.array(purchase_predict_class).sum(axis=0) return purchase_predict_return def multiprocess_fitness_redeem(self, j): multiple_time = np.hstack((self.tempfinal[j, 1], np.tile( self.tempfinal[j, 2], 7), np.tile(self.tempfinal[j, 3], 12))) # 拼接倍数 for k in range(4, self.tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, self.tempfinal[j, k])) user_profile_onehot = self.user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(self.tempfinal[j, 0])) user_balance = Loaddata.UserBalance() redeem_predict_class = [] redeem_test_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 print('第'+str(j+1)+'个种群 第'+str(i+1)+'个类') user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) # 主要时间花在这里!!!! user_balance.CalculateDayRedeemList( divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataUsedInPredict() purchase_test, redeem_test = user_balance.GetTestData() redeem_model = LSTM_double(redeem_train.reshape((-1, 1))) redeem_model.lr = 0.0001 redeem_model.train_lstm(num_epochs=60, numb_sub=j+1,numb_class=i+1,class_people='redeem') redeem_predict = redeem_model.prediction(numb_sub=j+1,numb_class=i+1,class_people='redeem') tf.reset_default_graph() plt.plot(redeem_predict, 'b') plt.plot(redeem_test, 'g') plt.savefig('out_lstm_double/redeem_the_{}_times_the_{}_gene_the_{}_class.png'.format( str(self.times_calc), str(j+1), str(i+1))) plt.close() redeem_predict_class.append(redeem_predict) redeem_test_class.append(redeem_test) redeem_loss_value = np.mean(abs(np.array(redeem_predict_class).sum( axis=0) - np.array(redeem_test_class).sum(axis=0))/(np.array(redeem_test_class).sum(axis=0))) return 1/redeem_loss_value def fitness_redeem(self, tempfinal, user_profile_onehot, times_calc): # 适应度 self.user_profile_onehot = user_profile_onehot self.tempfinal = tempfinal self.times_calc = times_calc pool = mp.Pool(processes=tempfinal.shape[0]) redeem_loss_value = pool.map( self.multiprocess_fitness_redeem, range(tempfinal.shape[0])) pool.close() pool.join() return np.squeeze(redeem_loss_value) def fitness_predict_redeem(self,length_best, tempfinal, user_profile_onehot, user_balance): multiple_time = np.hstack((tempfinal[0, 1], np.tile( tempfinal[0, 2], 7), np.tile(tempfinal[0, 3], 12))) # 拼接倍数 for k in range(4, tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, tempfinal[0, k])) user_profile_onehot = user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(tempfinal[0, 0])) redeem_predict_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) user_balance.CalculateDayRedeemList(divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataAll() # LSTM_double redeem_model = LSTM_double(redeem_train.reshape((-1, 1))) redeem_model.lr = 0.0001 redeem_model.train_lstm(num_epochs=10,numb_sub = length_best,numb_class=i+1,continue_train=True,class_people='redeem') redeem_predict = redeem_model.prediction(numb_sub = length_best,numb_class=i+1,class_people='redeem') tf.reset_default_graph() redeem_predict_class.append(redeem_predict) redeem_predict_return = np.array(redeem_predict_class).sum(axis=0) return redeem_predict_return def calfitValue(self, value): # 保证损失大于等于0 好像没什么必要的样子 for i in range(value.shape[0]): if value[i] < 0: value[i] = 0 return value def selection(self, pop, value): # 选择 newfitvalue = np.zeros((value.shape[0], 1)) totalValue = sum(value) accumalator = 0 j = 0 for i in value: # 轮盘赌 newValue = (i*1.0/totalValue) accumalator += newValue newfitvalue[j] = (accumalator) j = j+1 newfitvalue[j-1] = 1 ms = [] for i in range(value.shape[0]): ms.append(random.random()) ms.sort() fitin = 0 newin = 0 newpop = pop while newin < value.shape[0]: if(ms[newin] < newfitvalue[fitin]): newpop[newin] = pop[fitin] newin = newin+1 else: fitin = fitin+1 return newpop def crossover(self, pop, crossrate, chromLength): # 交叉 row = pop.shape[0]-1 # 确保有两个基因能够对位交叉 pop = pop.tolist() for i in range(0, row, 2): if(random.random() < crossrate): # 对基因块的不同部分进行交叉部位生成 singpoint = random.randint(chromLength) temp1 = [] temp2 = [] temp1.extend(pop[i][0:singpoint]) temp1.extend(pop[i + 1][singpoint:chromLength]) temp2.extend(pop[i + 1][0:singpoint]) temp2.extend(pop[i][singpoint:chromLength]) pop[i] = temp1 # 生成新子群 pop[i + 1] = temp2 pop = np.array(pop) return pop def mutation(self, pop, mutationrate, chromLength): # 变异 row = pop.shape[0] for i in range(row): if (random.random() < mutationrate): mpoint = random.randint(0, chromLength) # 变异部位 if(pop[i, mpoint] == 1): pop[i, mpoint] = 0 else: pop[i, mpoint] = 1 return pop def best(self, pop, value, chromLength): bestvalue = value.max() find_best = np.argmax(value) temp = pop[find_best, :].reshape((-1, chromLength)) return temp, bestvalue, find_best+1
the-stack_0_8157
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Border(_BaseTraceHierarchyType): # class properties # -------------------- _parent_path_str = "pointcloud.marker" _path_str = "pointcloud.marker.border" _valid_props = {"arearatio", "color"} # arearatio # --------- @property def arearatio(self): """ Specifies what fraction of the marker area is covered with the border. The 'arearatio' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["arearatio"] @arearatio.setter def arearatio(self, val): self["arearatio"] = val # color # ----- @property def color(self): """ Sets the stroke color. It accepts a specific color. If the color is not fully opaque and there are hundreds of thousands of points, it may cause slower zooming and panning. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ arearatio Specifies what fraction of the marker area is covered with the border. color Sets the stroke color. It accepts a specific color. If the color is not fully opaque and there are hundreds of thousands of points, it may cause slower zooming and panning. """ def __init__(self, arg=None, arearatio=None, color=None, **kwargs): """ Construct a new Border object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.pointcloud.marker.Border` arearatio Specifies what fraction of the marker area is covered with the border. color Sets the stroke color. It accepts a specific color. If the color is not fully opaque and there are hundreds of thousands of points, it may cause slower zooming and panning. Returns ------- Border """ super(Border, self).__init__("border") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.pointcloud.marker.Border constructor must be a dict or an instance of :class:`plotly.graph_objs.pointcloud.marker.Border`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("arearatio", None) _v = arearatio if arearatio is not None else _v if _v is not None: self["arearatio"] = _v _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
the-stack_0_8158
from array import array from functools import partial import traceback import importlib from enum import Enum import dask from dask.base import normalize_token import msgpack from . import pickle from ..utils import has_keyword, typename, ensure_bytes from .compression import maybe_compress, decompress from .utils import ( unpack_frames, pack_frames_prelude, frame_split_size, msgpack_opts, ) lazy_registrations = {} dask_serialize = dask.utils.Dispatch("dask_serialize") dask_deserialize = dask.utils.Dispatch("dask_deserialize") _cached_allowed_modules = {} def dask_dumps(x, context=None): """Serialize object using the class-based registry""" type_name = typename(type(x)) try: dumps = dask_serialize.dispatch(type(x)) except TypeError: raise NotImplementedError(type_name) if has_keyword(dumps, "context"): header, frames = dumps(x, context=context) else: header, frames = dumps(x) header["type"] = type_name header["type-serialized"] = pickle.dumps(type(x), protocol=4) header["serializer"] = "dask" return header, frames def dask_loads(header, frames): typ = pickle.loads(header["type-serialized"]) loads = dask_deserialize.dispatch(typ) return loads(header, frames) def pickle_dumps(x, context=None): frames = [None] buffer_callback = lambda f: frames.append(memoryview(f)) frames[0] = pickle.dumps( x, buffer_callback=buffer_callback, protocol=context.get("pickle-protocol", None) if context else None, ) header = { "serializer": "pickle", "writeable": tuple(not f.readonly for f in frames[1:]), } return header, frames def pickle_loads(header, frames): x, buffers = frames[0], frames[1:] writeable = header["writeable"] for i in range(len(buffers)): mv = memoryview(buffers[i]) if writeable[i] == mv.readonly: if mv.readonly: buffers[i] = memoryview(bytearray(mv)).cast(mv.format, mv.shape) else: buffers[i] = memoryview(bytes(mv)).cast(mv.format, mv.shape) return pickle.loads(x, buffers=buffers) def import_allowed_module(name): if name in _cached_allowed_modules: return _cached_allowed_modules[name] # Check for non-ASCII characters name = name.encode("ascii").decode() # We only compare the root module root = name.split(".", 1)[0] # Note, if an empty string creeps into allowed-imports it is disallowed explicitly if root and root in dask.config.get("distributed.scheduler.allowed-imports"): _cached_allowed_modules[name] = importlib.import_module(name) return _cached_allowed_modules[name] else: raise RuntimeError( f"Importing {repr(name)} is not allowed, please add it to the list of " "allowed modules the scheduler can import via the " "distributed.scheduler.allowed-imports configuration setting." ) def msgpack_decode_default(obj): """ Custom packer/unpacker for msgpack """ if "__Enum__" in obj: mod = import_allowed_module(obj["__module__"]) typ = getattr(mod, obj["__name__"]) return getattr(typ, obj["name"]) if "__Set__" in obj: return set(obj["as-list"]) if "__Serialized__" in obj: # Notice, the data here is marked a Serialized rather than deserialized. This # is because deserialization requires Pickle which the Scheduler cannot run # because of security reasons. # By marking it Serialized, the data is passed through to the workers that # eventually will deserialize it. return Serialized(*obj["data"]) return obj def msgpack_encode_default(obj): """ Custom packer/unpacker for msgpack """ if isinstance(obj, Serialize): return {"__Serialized__": True, "data": serialize(obj.data)} if isinstance(obj, Enum): return { "__Enum__": True, "name": obj.name, "__module__": obj.__module__, "__name__": type(obj).__name__, } if isinstance(obj, set): return {"__Set__": True, "as-list": list(obj)} return obj def msgpack_dumps(x): try: frame = msgpack.dumps(x, use_bin_type=True) except Exception: raise NotImplementedError() else: return {"serializer": "msgpack"}, [frame] def msgpack_loads(header, frames): return msgpack.loads(b"".join(frames), use_list=False, **msgpack_opts) def serialization_error_loads(header, frames): msg = "\n".join([ensure_bytes(frame).decode("utf8") for frame in frames]) raise TypeError(msg) families = {} def register_serialization_family(name, dumps, loads): families[name] = (dumps, loads, dumps and has_keyword(dumps, "context")) register_serialization_family("dask", dask_dumps, dask_loads) register_serialization_family("pickle", pickle_dumps, pickle_loads) register_serialization_family("msgpack", msgpack_dumps, msgpack_loads) register_serialization_family("error", None, serialization_error_loads) def check_dask_serializable(x): if type(x) in (list, set, tuple) and len(x): return check_dask_serializable(next(iter(x))) elif type(x) is dict and len(x): return check_dask_serializable(next(iter(x.items()))[1]) else: try: dask_serialize.dispatch(type(x)) return True except TypeError: pass return False def serialize(x, serializers=None, on_error="message", context=None): r""" Convert object to a header and list of bytestrings This takes in an arbitrary Python object and returns a msgpack serializable header and a list of bytes or memoryview objects. The serialization protocols to use are configurable: a list of names define the set of serializers to use, in order. These names are keys in the ``serializer_registry`` dict (e.g., 'pickle', 'msgpack'), which maps to the de/serialize functions. The name 'dask' is special, and will use the per-class serialization methods. ``None`` gives the default list ``['dask', 'pickle']``. Examples -------- >>> serialize(1) ({}, [b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.']) >>> serialize(b'123') # some special types get custom treatment ({'type': 'builtins.bytes'}, [b'123']) >>> deserialize(*serialize(1)) 1 Returns ------- header: dictionary containing any msgpack-serializable metadata frames: list of bytes or memoryviews, commonly of length one See Also -------- deserialize : Convert header and frames back to object to_serialize : Mark that data in a message should be serialized register_serialization : Register custom serialization functions """ if serializers is None: serializers = ("dask", "pickle") # TODO: get from configuration if isinstance(x, Serialized): return x.header, x.frames if type(x) in (list, set, tuple, dict): iterate_collection = False if type(x) is list and "msgpack" in serializers: # Note: "msgpack" will always convert lists to tuples # (see GitHub #3716), so we should iterate # through the list if "msgpack" comes before "pickle" # in the list of serializers. iterate_collection = ("pickle" not in serializers) or ( serializers.index("pickle") > serializers.index("msgpack") ) if not iterate_collection: # Check for "dask"-serializable data in dict/list/set iterate_collection = check_dask_serializable(x) # Determine whether keys are safe to be serialized with msgpack if type(x) is dict and iterate_collection: try: msgpack.dumps(list(x.keys())) except Exception: dict_safe = False else: dict_safe = True if ( type(x) in (list, set, tuple) and iterate_collection or type(x) is dict and iterate_collection and dict_safe ): if isinstance(x, dict): headers_frames = [] for k, v in x.items(): _header, _frames = serialize( v, serializers=serializers, on_error=on_error, context=context ) _header["key"] = k headers_frames.append((_header, _frames)) else: headers_frames = [ serialize( obj, serializers=serializers, on_error=on_error, context=context ) for obj in x ] frames = [] lengths = [] compressions = [] for _header, _frames in headers_frames: frames.extend(_frames) length = len(_frames) lengths.append(length) compressions.extend(_header.get("compression") or [None] * len(_frames)) headers = [obj[0] for obj in headers_frames] headers = { "sub-headers": headers, "is-collection": True, "frame-lengths": lengths, "type-serialized": type(x).__name__, } if any(compression is not None for compression in compressions): headers["compression"] = compressions return headers, frames tb = "" for name in serializers: dumps, loads, wants_context = families[name] try: header, frames = dumps(x, context=context) if wants_context else dumps(x) header["serializer"] = name return header, frames except NotImplementedError: continue except Exception as e: tb = traceback.format_exc() break msg = "Could not serialize object of type %s." % type(x).__name__ if on_error == "message": frames = [msg] if tb: frames.append(tb[:100000]) frames = [frame.encode() for frame in frames] return {"serializer": "error"}, frames elif on_error == "raise": raise TypeError(msg, str(x)[:10000]) def deserialize(header, frames, deserializers=None): """ Convert serialized header and list of bytestrings back to a Python object Parameters ---------- header : dict frames : list of bytes deserializers : Optional[Dict[str, Tuple[Callable, Callable, bool]]] An optional dict mapping a name to a (de)serializer. See `dask_serialize` and `dask_deserialize` for more. See Also -------- serialize """ if "is-collection" in header: headers = header["sub-headers"] lengths = header["frame-lengths"] cls = {"tuple": tuple, "list": list, "set": set, "dict": dict}[ header["type-serialized"] ] start = 0 if cls is dict: d = {} for _header, _length in zip(headers, lengths): k = _header.pop("key") d[k] = deserialize( _header, frames[start : start + _length], deserializers=deserializers, ) start += _length return d else: lst = [] for _header, _length in zip(headers, lengths): lst.append( deserialize( _header, frames[start : start + _length], deserializers=deserializers, ) ) start += _length return cls(lst) name = header.get("serializer") if deserializers is not None and name not in deserializers: raise TypeError( "Data serialized with %s but only able to deserialize " "data with %s" % (name, str(list(deserializers))) ) dumps, loads, wants_context = families[name] return loads(header, frames) def serialize_and_split(x, serializers=None, on_error="message", context=None): """Serialize and split compressable frames This function is a drop-in replacement of `serialize()` that calls `serialize()` followed by `frame_split_size()` on frames that should be compressed. Use `merge_and_deserialize()` to merge and deserialize the frames back. See Also -------- serialize merge_and_deserialize """ header, frames = serialize(x, serializers, on_error, context) num_sub_frames = [] offsets = [] out_frames = [] out_compression = [] for frame, compression in zip( frames, header.get("compression") or [None] * len(frames) ): if compression is None: # default behavior sub_frames = frame_split_size(frame) num_sub_frames.append(len(sub_frames)) offsets.append(len(out_frames)) out_frames.extend(sub_frames) out_compression.extend([None] * len(sub_frames)) else: num_sub_frames.append(1) offsets.append(len(out_frames)) out_frames.append(frame) out_compression.append(compression) assert len(out_compression) == len(out_frames) # Notice, in order to match msgpack's implicit convertion to tuples, # we convert to tuples here as well. header["split-num-sub-frames"] = tuple(num_sub_frames) header["split-offsets"] = tuple(offsets) header["compression"] = tuple(out_compression) return header, out_frames def merge_and_deserialize(header, frames, deserializers=None): """Merge and deserialize frames This function is a drop-in replacement of `deserialize()` that merges frames that were split by `serialize_and_split()` See Also -------- deserialize serialize_and_split """ merged_frames = [] if "split-num-sub-frames" not in header: merged_frames = frames else: for n, offset in zip(header["split-num-sub-frames"], header["split-offsets"]): if n == 1: merged_frames.append(frames[offset]) else: merged_frames.append(bytearray().join(frames[offset : offset + n])) return deserialize(header, merged_frames, deserializers=deserializers) class Serialize: """Mark an object that should be serialized Examples -------- >>> msg = {'op': 'update', 'data': to_serialize(123)} >>> msg # doctest: +SKIP {'op': 'update', 'data': <Serialize: 123>} See also -------- distributed.protocol.dumps """ def __init__(self, data): self.data = data def __repr__(self): return "<Serialize: %s>" % str(self.data) def __eq__(self, other): return isinstance(other, Serialize) and other.data == self.data def __ne__(self, other): return not (self == other) def __hash__(self): return hash(self.data) to_serialize = Serialize class Serialized: """ An object that is already serialized into header and frames Normal serialization operations pass these objects through. This is typically used within the scheduler which accepts messages that contain data without actually unpacking that data. """ def __init__(self, header, frames): self.header = header self.frames = frames def __eq__(self, other): return ( isinstance(other, Serialized) and other.header == self.header and other.frames == self.frames ) def __ne__(self, other): return not (self == other) def extract_serialize(x) -> tuple: """Pull out Serialize objects from message This also remove large bytestrings from the message into a second dictionary. Examples -------- >>> from distributed.protocol import to_serialize >>> msg = {'op': 'update', 'data': to_serialize(123)} >>> extract_serialize(msg) ({'op': 'update'}, {('data',): <Serialize: 123>}, set()) """ typ_x: type = type(x) if typ_x is dict: x_d: dict = x x_items = x_d.items() x2 = {} elif typ_x is list: x_l: list = x x_items = enumerate(x_l) x2 = len(x_l) * [None] ser = {} bytestrings = set() path = () _extract_serialize(x_items, x2, ser, bytestrings, path) return x2, ser, bytestrings def _extract_serialize(x_items, x2, ser: dict, bytestrings: set, path: tuple) -> None: for k, v in x_items: path_k = path + (k,) typ_v: type = type(v) if typ_v is dict: v_d: dict = v v_items = v_d.items() x2[k] = v2 = {} _extract_serialize(v_items, v2, ser, bytestrings, path_k) elif typ_v is list: v_l: list = v v_items = enumerate(v_l) x2[k] = v2 = len(v_l) * [None] _extract_serialize(v_items, v2, ser, bytestrings, path_k) elif typ_v is Serialize or typ_v is Serialized: ser[path_k] = v elif typ_v is bytes: v_b: bytes = v if len(v_b) > 2 ** 16: ser[path_k] = to_serialize(v_b) bytestrings.add(path_k) else: x2[k] = v_b elif typ_v is bytearray: v_ba: bytearray = v if len(v_ba) > 2 ** 16: ser[path_k] = to_serialize(v_ba) bytestrings.add(path_k) else: x2[k] = v_ba else: x2[k] = v def nested_deserialize(x): """ Replace all Serialize and Serialized values nested in *x* with the original values. Returns a copy of *x*. >>> msg = {'op': 'update', 'data': to_serialize(123)} >>> nested_deserialize(msg) {'op': 'update', 'data': 123} """ def replace_inner(x): if type(x) is dict: x = x.copy() for k, v in x.items(): typ = type(v) if typ is dict or typ is list: x[k] = replace_inner(v) elif typ is Serialize: x[k] = v.data elif typ is Serialized: x[k] = deserialize(v.header, v.frames) elif type(x) is list: x = list(x) for k, v in enumerate(x): typ = type(v) if typ is dict or typ is list: x[k] = replace_inner(v) elif typ is Serialize: x[k] = v.data elif typ is Serialized: x[k] = deserialize(v.header, v.frames) return x return replace_inner(x) def serialize_bytelist(x, **kwargs): header, frames = serialize_and_split(x, **kwargs) if frames: compression, frames = zip(*map(maybe_compress, frames)) else: compression = [] header["compression"] = compression header["count"] = len(frames) header = msgpack.dumps(header, use_bin_type=True) frames2 = [header, *frames] frames2.insert(0, pack_frames_prelude(frames2)) return frames2 def serialize_bytes(x, **kwargs): L = serialize_bytelist(x, **kwargs) return b"".join(L) def deserialize_bytes(b): frames = unpack_frames(b) header, frames = frames[0], frames[1:] if header: header = msgpack.loads(header, raw=False, use_list=False) else: header = {} frames = decompress(header, frames) return merge_and_deserialize(header, frames) ################################ # Class specific serialization # ################################ def register_serialization(cls, serialize, deserialize): """Register a new class for dask-custom serialization Parameters ---------- cls : type serialize : callable(cls) -> Tuple[Dict, List[bytes]] deserialize : callable(header: Dict, frames: List[bytes]) -> cls Examples -------- >>> class Human: ... def __init__(self, name): ... self.name = name >>> def serialize(human): ... header = {} ... frames = [human.name.encode()] ... return header, frames >>> def deserialize(header, frames): ... return Human(frames[0].decode()) >>> register_serialization(Human, serialize, deserialize) >>> serialize(Human('Alice')) ({}, [b'Alice']) See Also -------- serialize deserialize """ if isinstance(cls, str): raise TypeError( "Strings are no longer accepted for type registration. " "Use dask_serialize.register_lazy instead" ) dask_serialize.register(cls)(serialize) dask_deserialize.register(cls)(deserialize) def register_serialization_lazy(toplevel, func): """Register a registration function to be called if *toplevel* module is ever loaded. """ raise Exception("Serialization registration has changed. See documentation") @partial(normalize_token.register, Serialized) def normalize_Serialized(o): return [o.header] + o.frames # for dask.base.tokenize # Teach serialize how to handle bytes @dask_serialize.register(bytes) def _serialize_bytes(obj): header = {} # no special metadata frames = [obj] return header, frames # Teach serialize how to handle bytestrings @dask_serialize.register(bytearray) def _serialize_bytearray(obj): header = {} # no special metadata frames = [obj] return header, frames @dask_deserialize.register(bytes) def _deserialize_bytes(header, frames): if len(frames) == 1 and isinstance(frames[0], bytes): return frames[0] else: return bytes().join(frames) @dask_deserialize.register(bytearray) def _deserialize_bytearray(header, frames): if len(frames) == 1 and isinstance(frames[0], bytearray): return frames[0] else: return bytearray().join(frames) @dask_serialize.register(array) def _serialize_array(obj): header = {"typecode": obj.typecode, "writeable": (None,)} frames = [memoryview(obj)] return header, frames @dask_deserialize.register(array) def _deserialize_array(header, frames): a = array(header["typecode"]) for f in map(memoryview, frames): try: f = f.cast("B") except TypeError: f = f.tobytes() a.frombytes(f) return a @dask_serialize.register(memoryview) def _serialize_memoryview(obj): if obj.format == "O": raise ValueError("Cannot serialize `memoryview` containing Python objects") header = {"format": obj.format, "shape": obj.shape} frames = [obj] return header, frames @dask_deserialize.register(memoryview) def _deserialize_memoryview(header, frames): if len(frames) == 1: out = memoryview(frames[0]).cast("B") else: out = memoryview(b"".join(frames)) out = out.cast(header["format"], header["shape"]) return out ######################### # Descend into __dict__ # ######################### def _is_msgpack_serializable(v): typ = type(v) return ( v is None or typ is str or typ is bool or typ is int or typ is float or isinstance(v, dict) and all(map(_is_msgpack_serializable, v.values())) and all(typ is str for x in v.keys()) or isinstance(v, (list, tuple)) and all(map(_is_msgpack_serializable, v)) ) class ObjectDictSerializer: def __init__(self, serializer): self.serializer = serializer def serialize(self, est): header = { "serializer": self.serializer, "type-serialized": pickle.dumps(type(est), protocol=4), "simple": {}, "complex": {}, } frames = [] if isinstance(est, dict): d = est else: d = est.__dict__ for k, v in d.items(): if _is_msgpack_serializable(v): header["simple"][k] = v else: if isinstance(v, dict): h, f = self.serialize(v) else: h, f = serialize(v, serializers=(self.serializer, "pickle")) header["complex"][k] = { "header": h, "start": len(frames), "stop": len(frames) + len(f), } frames += f return header, frames def deserialize(self, header, frames): cls = pickle.loads(header["type-serialized"]) if issubclass(cls, dict): dd = obj = {} else: obj = object.__new__(cls) dd = obj.__dict__ dd.update(header["simple"]) for k, d in header["complex"].items(): h = d["header"] f = frames[d["start"] : d["stop"]] v = deserialize(h, f) dd[k] = v return obj dask_object_with_dict_serializer = ObjectDictSerializer("dask") dask_deserialize.register(dict)(dask_object_with_dict_serializer.deserialize) def register_generic( cls, serializer_name="dask", serialize_func=dask_serialize, deserialize_func=dask_deserialize, ): """Register (de)serialize to traverse through __dict__ Normally when registering new classes for Dask's custom serialization you need to manage headers and frames, which can be tedious. If all you want to do is traverse through your object and apply serialize to all of your object's attributes then this function may provide an easier path. This registers a class for the custom Dask serialization family. It serializes it by traversing through its __dict__ of attributes and applying ``serialize`` and ``deserialize`` recursively. It collects a set of frames and keeps small attributes in the header. Deserialization reverses this process. This is a good idea if the following hold: 1. Most of the bytes of your object are composed of data types that Dask's custom serializtion already handles well, like Numpy arrays. 2. Your object doesn't require any special constructor logic, other than object.__new__(cls) Examples -------- >>> import sklearn.base >>> from distributed.protocol import register_generic >>> register_generic(sklearn.base.BaseEstimator) See Also -------- dask_serialize dask_deserialize """ object_with_dict_serializer = ObjectDictSerializer(serializer_name) serialize_func.register(cls)(object_with_dict_serializer.serialize) deserialize_func.register(cls)(object_with_dict_serializer.deserialize)
the-stack_0_8160
list_ = input() # list_ = "день победы 1945 года 9 мая" list_01 = list_.split(' ') num_ = [] for i in list_01: if i.isdigit(): # условие должно быть [True], можно не прописывать # print(list_01) num_.append(int(i)) # print(num_) num_.sort() # не нужно создавать новый массив, преобразует (сказано было в теоретической части) print(num_)
the-stack_0_8162
import inspect import os import shutil import subprocess import stat import sys import tarfile import time import zipfile def install_requirements(what): old_path = sys.path[:] w = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.dirname(os.path.dirname(w))) try: from setup import EXTRAS_REQUIRE, read finally: sys.path = old_path requirements = ['mock>=2.0.0', 'flake8', 'pytest', 'pytest-cov'] if what == 'all' else ['behave'] requirements += ['psycopg2-binary', 'coverage'] for r in read('requirements.txt').split('\n'): r = r.strip() if r != '': extras = {e for e, v in EXTRAS_REQUIRE.items() if v and r.startswith(v[0])} if not extras or what == 'all' or what in extras: requirements.append(r) subprocess.call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'pip']) r = subprocess.call([sys.executable, '-m', 'pip', 'install'] + requirements) s = subprocess.call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'setuptools']) return s | r def install_packages(what): packages = { 'zookeeper': ['zookeeper', 'zookeeper-bin', 'zookeeperd'], 'consul': ['consul'], } packages['exhibitor'] = packages['zookeeper'] packages = packages.get(what, []) ver = str({'etcd': '9.6', 'etcd3': '13', 'consul': 12, 'exhibitor': 11, 'kubernetes': 13, 'raft': 12}.get(what)) subprocess.call(['sudo', 'apt-get', 'update', '-y']) return subprocess.call(['sudo', 'apt-get', 'install', '-y', 'postgresql-' + ver, 'expect-dev', 'wget'] + packages) def get_file(url, name): try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve print('Downloading ' + url) urlretrieve(url, name) def untar(archive, name): with tarfile.open(archive) as tar: f = tar.extractfile(name) dest = os.path.basename(name) with open(dest, 'wb') as d: shutil.copyfileobj(f, d) return dest def unzip(archive, name): with zipfile.ZipFile(archive, 'r') as z: name = z.extract(name) dest = os.path.basename(name) shutil.move(name, dest) return dest def unzip_all(archive): print('Extracting ' + archive) with zipfile.ZipFile(archive, 'r') as z: z.extractall() def chmod_755(name): os.chmod(name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) def unpack(archive, name): print('Extracting {0} from {1}'.format(name, archive)) func = unzip if archive.endswith('.zip') else untar name = func(archive, name) chmod_755(name) return name def install_etcd(): version = os.environ.get('ETCDVERSION', '3.3.13') platform = {'linux2': 'linux', 'win32': 'windows', 'cygwin': 'windows'}.get(sys.platform, sys.platform) dirname = 'etcd-v{0}-{1}-amd64'.format(version, platform) ext = 'tar.gz' if platform == 'linux' else 'zip' name = '{0}.{1}'.format(dirname, ext) url = 'https://github.com/etcd-io/etcd/releases/download/v{0}/{1}'.format(version, name) get_file(url, name) ext = '.exe' if platform == 'windows' else '' return int(unpack(name, '{0}/etcd{1}'.format(dirname, ext)) is None) def install_postgres(): version = os.environ.get('PGVERSION', '12.1-1') platform = {'darwin': 'osx', 'win32': 'windows-x64', 'cygwin': 'windows-x64'}[sys.platform] name = 'postgresql-{0}-{1}-binaries.zip'.format(version, platform) get_file('http://get.enterprisedb.com/postgresql/' + name, name) unzip_all(name) bin_dir = os.path.join('pgsql', 'bin') for f in os.listdir(bin_dir): chmod_755(os.path.join(bin_dir, f)) subprocess.call(['pgsql/bin/postgres', '-V']) return 0 def setup_kubernetes(): get_file('https://storage.googleapis.com/minikube/k8sReleases/v1.7.0/localkube-linux-amd64', 'localkube') chmod_755('localkube') devnull = open(os.devnull, 'w') subprocess.Popen(['sudo', 'nohup', './localkube', '--logtostderr=true', '--enable-dns=false'], stdout=devnull, stderr=devnull) for _ in range(0, 120): if subprocess.call(['wget', '-qO', '-', 'http://127.0.0.1:8080/'], stdout=devnull, stderr=devnull) == 0: break time.sleep(1) else: print('localkube did not start') return 1 subprocess.call('sudo chmod 644 /var/lib/localkube/certs/*', shell=True) print('Set up .kube/config') kube = os.path.join(os.path.expanduser('~'), '.kube') os.makedirs(kube) with open(os.path.join(kube, 'config'), 'w') as f: f.write("""apiVersion: v1 clusters: - cluster: certificate-authority: /var/lib/localkube/certs/ca.crt server: https://127.0.0.1:8443 name: local contexts: - context: cluster: local user: myself name: local current-context: local kind: Config preferences: {} users: - name: myself user: client-certificate: /var/lib/localkube/certs/apiserver.crt client-key: /var/lib/localkube/certs/apiserver.key """) return 0 def main(): what = os.environ.get('DCS', sys.argv[1] if len(sys.argv) > 1 else 'all') if what != 'all': if sys.platform.startswith('linux'): r = install_packages(what) if r == 0 and what == 'kubernetes': r = setup_kubernetes() else: r = install_postgres() if r == 0 and what.startswith('etcd'): r = install_etcd() if r != 0: return r return install_requirements(what) if __name__ == '__main__': sys.exit(main())
the-stack_0_8163
import tempfile from pathlib import Path import argparse import shutil import os import glob import cv2 import cog from run import run_cmd from datetime import datetime class Predictor(cog.Predictor): def setup(self): parser = argparse.ArgumentParser() parser.add_argument( "--input_folder", type=str, default="input/cog_temp"+ str(datetime.utcnow().timestamp()), help="Test images" ) parser.add_argument( "--output_folder", type=str, default="output"+ str(datetime.utcnow().timestamp()), help="Restored images, please use the absolute path", ) parser.add_argument("--GPU", type=str, default="0", help="0,1,2") parser.add_argument( "--checkpoint_name", type=str, default="Setting_9_epoch_100", help="choose which checkpoint", ) self.opts = parser.parse_args("") self.basepath = os.getcwd() self.opts.input_folder = os.path.join(self.basepath, self.opts.input_folder) self.opts.output_folder = os.path.join(self.basepath, self.opts.output_folder) os.makedirs(self.opts.input_folder, exist_ok=True) os.makedirs(self.opts.output_folder, exist_ok=True) @cog.input("image", type=Path, help="input image") @cog.input( "HR", type=bool, default=False, help="whether the input image is high-resolution", ) @cog.input( "with_scratch", type=bool, default=False, help="whether the input image is scratched", ) def predict(self, image, HR=False, with_scratch=False): try: os.chdir(self.basepath) input_path = os.path.join(self.opts.input_folder, os.path.basename(image)) shutil.copy(str(image), input_path) gpu1 = self.opts.GPU ## Stage 1: Overall Quality Improve print("Running Stage 1: Overall restoration") os.chdir("./Global") stage_1_input_dir = self.opts.input_folder stage_1_output_dir = os.path.join( self.opts.output_folder, "stage_1_restore_output" ) os.makedirs(stage_1_output_dir, exist_ok=True) if not with_scratch: stage_1_command = ( "python test.py --test_mode Full --Quality_restore --test_input " + stage_1_input_dir + " --outputs_dir " + stage_1_output_dir + " --gpu_ids " + gpu1 ) run_cmd(stage_1_command) else: mask_dir = os.path.join(stage_1_output_dir, "masks") new_input = os.path.join(mask_dir, "input") new_mask = os.path.join(mask_dir, "mask") stage_1_command_1 = ( "python detection.py --test_path " + stage_1_input_dir + " --output_dir " + mask_dir + " --input_size full_size" + " --GPU " + gpu1 ) if HR: HR_suffix = " --HR" else: HR_suffix = "" stage_1_command_2 = ( "python test.py --Scratch_and_Quality_restore --test_input " + new_input + " --test_mask " + new_mask + " --outputs_dir " + stage_1_output_dir + " --gpu_ids " + gpu1 + HR_suffix ) run_cmd(stage_1_command_1) run_cmd(stage_1_command_2) ## Solve the case when there is no face in the old photo stage_1_results = os.path.join(stage_1_output_dir, "restored_image") stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output") os.makedirs(stage_4_output_dir, exist_ok=True) for x in os.listdir(stage_1_results): img_dir = os.path.join(stage_1_results, x) shutil.copy(img_dir, stage_4_output_dir) print("Finish Stage 1 ...") print("\n") ## Stage 2: Face Detection print("Running Stage 2: Face Detection") os.chdir(".././Face_Detection") stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image") stage_2_output_dir = os.path.join( self.opts.output_folder, "stage_2_detection_output" ) os.makedirs(stage_2_output_dir, exist_ok=True) stage_2_command = ( "python detect_all_dlib_HR.py --url " + stage_2_input_dir + " --save_url " + stage_2_output_dir ) run_cmd(stage_2_command) print("Finish Stage 2 ...") print("\n") ## Stage 3: Face Restore print("Running Stage 3: Face Enhancement") os.chdir(".././Face_Enhancement") stage_3_input_mask = "./" stage_3_input_face = stage_2_output_dir stage_3_output_dir = os.path.join( self.opts.output_folder, "stage_3_face_output" ) os.makedirs(stage_3_output_dir, exist_ok=True) self.opts.checkpoint_name = "FaceSR_512" stage_3_command = ( "python test_face.py --old_face_folder " + stage_3_input_face + " --old_face_label_folder " + stage_3_input_mask + " --tensorboard_log --name " + self.opts.checkpoint_name + " --gpu_ids " + gpu1 + " --load_size 512 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 1 --results_dir " + stage_3_output_dir + " --no_parsing_map" ) run_cmd(stage_3_command) print("Finish Stage 3 ...") print("\n") ## Stage 4: Warp back print("Running Stage 4: Blending") os.chdir(".././Face_Detection") stage_4_input_image_dir = os.path.join(stage_1_output_dir, "restored_image") stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img") stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output") os.makedirs(stage_4_output_dir, exist_ok=True) stage_4_command = ( "python align_warp_back_multiple_dlib_HR.py --origin_url " + stage_4_input_image_dir + " --replace_url " + stage_4_input_face_dir + " --save_url " + stage_4_output_dir ) run_cmd(stage_4_command) print("Finish Stage 4 ...") print("\n") print("All the processing is done. Please check the results.") final_output = os.listdir(os.path.join(self.opts.output_folder, "final_output"))[0] image_restore = cv2.imread(os.path.join(self.opts.output_folder, "final_output", final_output)) out_path = Path(tempfile.mkdtemp()) / "out.png" cv2.imwrite(str(out_path), image_restore) finally: clean_folder(self.opts.input_folder) clean_folder(self.opts.output_folder) return out_path def clean_folder(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(f"Failed to delete {file_path}. Reason:{e}")
the-stack_0_8164
#!/usr/bin/env python3 # # Copyright 2022 Graviti. Licensed under MIT License. # """The implementation of the Sheets.""" from typing import Any, Dict, Iterator, MutableMapping from tensorbay.dataset import Notes, RemoteData from tensorbay.label import Catalog from tensorbay.utility import URL from graviti.client import get_catalog, get_notes, list_data_details, list_segments from graviti.dataframe import DataFrame from graviti.portex import Extractors, catalog_to_schema, get_extractors from graviti.utility import LazyFactory, LazyList, NestedDict LazyLists = NestedDict[str, LazyList[Any]] class Sheets(MutableMapping[str, DataFrame]): """The basic structure of the Graviti sheets.""" _data: Dict[str, DataFrame] _dataset_id: str access_key: str url: str commit_id: str def __len__(self) -> int: return self._get_data().__len__() def __getitem__(self, key: str) -> DataFrame: return self._get_data().__getitem__(key) def __setitem__(self, key: str, value: DataFrame) -> None: self._get_data().__setitem__(key, value) def __delitem__(self, key: str) -> None: self._get_data().__delitem__(key) def __iter__(self) -> Iterator[str]: return self._get_data().__iter__() def _get_lazy_lists(self, factory: LazyFactory, extractors: Extractors) -> LazyLists: lazy_lists: LazyLists = {} for key, arguments in extractors.items(): if isinstance(arguments, tuple): lazy_lists[key] = factory.create_list(*arguments) else: lazy_lists[key] = self._get_lazy_lists(factory, arguments) return lazy_lists def _init_data(self) -> None: self._data = {} response = list_segments( self.url, self.access_key, self._dataset_id, commit=self.commit_id, ) for sheet in response["segments"]: sheet_name = sheet["name"] data_details = list_data_details( self.url, self.access_key, self._dataset_id, sheet_name, commit=self.commit_id, ) def factory_getter( offset: int, limit: int, sheet_name: str = sheet_name ) -> Dict[str, Any]: return list_data_details( self.url, self.access_key, self._dataset_id, sheet_name, commit=self.commit_id, offset=offset, limit=limit, ) factory = LazyFactory( data_details["totalCount"], 128, factory_getter, ) catalog = get_catalog( self.url, self.access_key, self._dataset_id, commit=self.commit_id, ) first_data_details = data_details["dataDetails"][0] remote_data = RemoteData.from_response_body( first_data_details, url=URL( first_data_details["url"], updater=lambda: "update is not supported currently" ), ) notes = get_notes( self.url, self.access_key, self._dataset_id, commit=self.commit_id, ) schema = catalog_to_schema( Catalog.loads(catalog["catalog"]), remote_data, Notes.loads(notes) ) lazy_lists = self._get_lazy_lists(factory, get_extractors(schema)) self._data[sheet_name] = DataFrame.from_lazy_lists(lazy_lists) def _get_data(self) -> Dict[str, DataFrame]: if not hasattr(self, "_data"): self._init_data() return self._data
the-stack_0_8165
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Wrapper layers: layers that augment the functionality of another layer. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.eager import context from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers.recurrent import _standardize_args from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import layer_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.Wrapper') class Wrapper(Layer): """Abstract wrapper base class. Wrappers take another layer and augment it in various ways. Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. Arguments: layer: The layer to be wrapped. """ def __init__(self, layer, **kwargs): assert isinstance(layer, Layer) self.layer = layer super(Wrapper, self).__init__(**kwargs) def build(self, input_shape=None): if not self.layer.built: self.layer.build(input_shape) self.layer.built = True self.built = True @property def activity_regularizer(self): if hasattr(self.layer, 'activity_regularizer'): return self.layer.activity_regularizer else: return None def get_config(self): config = {'layer': generic_utils.serialize_keras_object(self.layer)} base_config = super(Wrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top # Avoid mutating the input dict config = copy.deepcopy(config) layer = deserialize_layer( config.pop('layer'), custom_objects=custom_objects) return cls(layer, **config) @keras_export('keras.layers.TimeDistributed') class TimeDistributed(Wrapper): """This wrapper allows to apply a layer to every temporal slice of an input. The input should be at least 3D, and the dimension of index one will be considered to be the temporal dimension. Consider a batch of 32 video samples, where each sample is a 128x128 RGB image with `channels_last` data format, across 10 timesteps. The batch input shape is `(32, 10, 128, 128, 3)`. You can then use `TimeDistributed` to apply a `Conv2D` layer to each of the 10 timesteps, independently: >>> inputs = tf.keras.Input(shape=(10, 128, 128, 3)) >>> conv_2d_layer = tf.keras.layers.Conv2D(64, (3, 3)) >>> outputs = tf.keras.layers.TimeDistributed(conv_2d_layer)(inputs) >>> outputs.shape TensorShape([None, 10, 126, 126, 64]) Arguments: layer: a `tf.keras.layers.Layer` instance. Call arguments: inputs: Input tensor. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the wrapped layer (only if the layer supports this argument). mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. This argument is passed to the wrapped layer (only if the layer supports this argument). Raises: ValueError: If not initialized with a `tf.keras.layers.Layer` instance. """ def __init__(self, layer, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `TimeDistributed` layer with a ' '`tf.keras.layers.Layer` instance. You passed: {input}'.format( input=layer)) super(TimeDistributed, self).__init__(layer, **kwargs) self.supports_masking = True # It is safe to use the fast, reshape-based approach with all of our # built-in Layers. self._always_use_reshape = ( layer_utils.is_builtin_layer(layer) and not getattr(layer, 'stateful', False)) def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None): """Finds non-specific dimensions in the static shapes. The static shapes are replaced with the corresponding dynamic shapes of the tensor. Arguments: init_tuple: a tuple, the first part of the output shape tensor: the tensor from which to get the (static and dynamic) shapes as the last part of the output shape start_idx: int, which indicate the first dimension to take from the static shape of the tensor int_shape: an alternative static shape to take as the last part of the output shape Returns: The new int_shape with the first part from init_tuple and the last part from either `int_shape` (if provided) or `tensor.shape`, where every `None` is replaced by the corresponding dimension from `tf.shape(tensor)`. """ # replace all None in int_shape by K.shape if int_shape is None: int_shape = K.int_shape(tensor)[start_idx:] if not any(not s for s in int_shape): return init_tuple + tuple(int_shape) shape = K.shape(tensor) int_shape = list(int_shape) for i, s in enumerate(int_shape): if not s: int_shape[i] = shape[start_idx + i] return init_tuple + tuple(int_shape) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if len(input_shape) < 3: raise ValueError( '`TimeDistributed` Layer should be passed an `input_shape ` ' 'with at least 3 dimensions, received: ' + str(input_shape)) # Don't enforce the batch or time dimension. self.input_spec = InputSpec(shape=[None, None] + input_shape[2:]) child_input_shape = [input_shape[0]] + input_shape[2:] super(TimeDistributed, self).build(tuple(child_input_shape)) self.built = True def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() child_input_shape = tensor_shape.TensorShape([input_shape[0]] + input_shape[2:]) child_output_shape = self.layer.compute_output_shape(child_input_shape) if not isinstance(child_output_shape, tensor_shape.TensorShape): child_output_shape = tensor_shape.TensorShape(child_output_shape) child_output_shape = child_output_shape.as_list() timesteps = input_shape[1] return tensor_shape.TensorShape([child_output_shape[0], timesteps] + child_output_shape[1:]) def call(self, inputs, training=None, mask=None): kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training input_shape = K.int_shape(inputs) if input_shape[0] and not self._always_use_reshape: inputs, row_lengths = K.convert_inputs_if_ragged(inputs) is_ragged_input = row_lengths is not None # batch size matters, use rnn-based implementation def step(x, _): output = self.layer(x, **kwargs) return output, [] _, outputs, _ = K.rnn( step, inputs, initial_states=[], input_length=row_lengths[0] if is_ragged_input else input_shape[1], mask=mask, unroll=False) y = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths) else: # No batch size specified, therefore the layer will be able # to process batches of any size. # We can go with reshape-based implementation for performance. if isinstance(inputs, ragged_tensor.RaggedTensor): y = self.layer(inputs.values, **kwargs) y = ragged_tensor.RaggedTensor.from_row_lengths( y, inputs.nested_row_lengths()[0]) else: input_length = input_shape[1] if not input_length: input_length = array_ops.shape(inputs)[1] inner_input_shape = self._get_shape_tuple((-1,), inputs, 2) # Shape: (num_samples * timesteps, ...). And track the # transformation in self._input_map. inputs = array_ops.reshape(inputs, inner_input_shape) # (num_samples * timesteps, ...) if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) kwargs['mask'] = K.reshape(mask, inner_mask_shape) y = self.layer(inputs, **kwargs) # Shape: (num_samples, timesteps, ...) output_shape = self.compute_output_shape(input_shape).as_list() output_shape = self._get_shape_tuple((-1, input_length), y, 1, output_shape[2:]) y = array_ops.reshape(y, output_shape) if not context.executing_eagerly(): # Set the static shape for the result since it might be lost during # array_ops reshape, eg, some `None` dim in the result could be # inferred. y.set_shape(self.compute_output_shape(input_shape)) return y def compute_mask(self, inputs, mask=None): """Computes an output mask tensor for Embedding layer. This is based on the inputs, mask, and the inner layer. If batch size is specified: Simply return the input `mask`. (An rnn-based implementation with more than one rnn inputs is required but not supported in tf.keras yet.) Otherwise we call `compute_mask` of the inner layer at each time step. If the output mask at each time step is not `None`: (E.g., inner layer is Masking or RNN) Concatenate all of them and return the concatenation. If the output mask at each time step is `None` and the input mask is not `None`:(E.g., inner layer is Dense) Reduce the input_mask to 2 dimensions and return it. Otherwise (both the output mask and the input mask are `None`): (E.g., `mask` is not used at all) Return `None`. Arguments: inputs: Tensor with shape [batch size, timesteps, ...] indicating the input to TimeDistributed. If static shape information is available for "batch size", `mask` is returned unmodified. mask: Either None (indicating no masking) or a Tensor indicating the input mask for TimeDistributed. The shape can be static or dynamic. Returns: Either None (no masking), or a [batch size, timesteps, ...] Tensor with an output mask for the TimeDistributed layer with the shape beyond the second dimension being the value of the input mask shape(if the computed output mask is none), an output mask with the shape beyond the first dimension being the value of the mask shape(if mask is not None) or output mask with the shape beyond the first dimension being the value of the computed output shape. """ # cases need to call the layer.compute_mask when input_mask is None: # Masking layer and Embedding layer with mask_zero input_shape = K.int_shape(inputs) if input_shape[0] and not self._always_use_reshape or isinstance( inputs, ragged_tensor.RaggedTensor): # batch size matters, we currently do not handle mask explicitly, or if # the layer always uses reshape approach, or the input is a ragged tensor. return mask inner_mask = mask if inner_mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) inner_mask = K.reshape(inner_mask, inner_mask_shape) inner_input_shape = self._get_shape_tuple((-1,), inputs, 2) inner_inputs = array_ops.reshape(inputs, inner_input_shape) output_mask = self.layer.compute_mask(inner_inputs, inner_mask) if output_mask is None: if mask is None: return None # input_mask is not None, and output_mask is None: # we should return a not-None mask output_mask = mask for _ in range(2, len(K.int_shape(mask))): output_mask = K.any(output_mask, axis=-1) else: # output_mask is not None. We need to reshape it input_length = input_shape[1] if not input_length: input_length = K.shape(inputs)[1] output_mask_int_shape = K.int_shape(output_mask) if output_mask_int_shape is None: # if the output_mask does not have a static shape, # its shape must be the same as mask's if mask is not None: output_mask_int_shape = K.int_shape(mask) else: output_mask_int_shape = K.compute_output_shape(input_shape)[:-1] output_mask_shape = self._get_shape_tuple( (-1, input_length), output_mask, 1, output_mask_int_shape[1:]) output_mask = K.reshape(output_mask, output_mask_shape) return output_mask @keras_export('keras.layers.Bidirectional') class Bidirectional(Wrapper): """Bidirectional wrapper for RNNs. Arguments: layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance that meets the following criteria: 1. Be a sequence-processing layer (accepts 3D+ inputs). 2. Have a `go_backwards`, `return_sequences` and `return_state` attribute (with the same semantics as for the `RNN` class). 3. Have an `input_spec` attribute. 4. Implement serialization via `get_config()` and `from_config()`. Note that the recommended way to create new RNN layers is to write a custom RNN cell and use it with `keras.layers.RNN`, instead of subclassing `keras.layers.Layer` directly. merge_mode: Mode by which outputs of the forward and backward RNNs will be combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the outputs will not be combined, they will be returned as a list. Default value is 'concat'. backward_layer: Optional `keras.layers.RNN`, or `keras.layers.Layer` instance to be used to handle backwards input processing. If `backward_layer` is not provided, the layer instance passed as the `layer` argument will be used to generate the backward layer automatically. Note that the provided `backward_layer` layer should have properties matching those of the `layer` argument, in particular it should have the same values for `stateful`, `return_states`, `return_sequence`, etc. In addition, `backward_layer` and `layer` should have different `go_backwards` argument values. A `ValueError` will be raised if these requirements are not met. Call arguments: The call arguments for this layer are the same as those of the wrapped RNN layer. Beware that when passing the `initial_state` argument during the call of this layer, the first half in the list of elements in the `initial_state` list will be passed to the forward RNN call and the last half in the list of elements will be passed to the backward RNN call. Raises: ValueError: 1. If `layer` or `backward_layer` is not a `Layer` instance. 2. In case of invalid `merge_mode` argument. 3. If `backward_layer` has mismatched properties compared to `layer`. Examples: ```python model = Sequential() model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10))) model.add(Bidirectional(LSTM(10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # With custom backward layer model = Sequential() forward_layer = LSTM(10, return_sequences=True) backward_layer = LSTM(10, activation='relu', return_sequences=True, go_backwards=True) model.add(Bidirectional(forward_layer, backward_layer=backward_layer, input_shape=(5, 10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') ``` """ def __init__(self, layer, merge_mode='concat', weights=None, backward_layer=None, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `Bidirectional` layer with a ' '`Layer` instance. You passed: {input}'.format(input=layer)) if backward_layer is not None and not isinstance(backward_layer, Layer): raise ValueError('`backward_layer` need to be a `Layer` instance. ' 'You passed: {input}'.format(input=backward_layer)) if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]: raise ValueError('Invalid merge mode. ' 'Merge mode should be one of ' '{"sum", "mul", "ave", "concat", None}') # We don't want to track `layer` since we're already tracking the two copies # of it we actually run. self._setattr_tracking = False super(Bidirectional, self).__init__(layer, **kwargs) self._setattr_tracking = True # Recreate the forward layer from the original layer config, so that it will # not carry over any state from the layer. self.forward_layer = self._recreate_layer_from_config(layer) if backward_layer is None: self.backward_layer = self._recreate_layer_from_config( layer, go_backwards=True) else: self.backward_layer = backward_layer # Keep the custom backward layer config, so that we can save it later. The # layer's name might be updated below with prefix 'backward_', and we want # to preserve the original config. self._backward_layer_config = generic_utils.serialize_keras_object( backward_layer) self.forward_layer._name = 'forward_' + self.forward_layer.name self.backward_layer._name = 'backward_' + self.backward_layer.name self._verify_layer_config() def force_zero_output_for_mask(layer): # Force the zero_output_for_mask to be True if returning sequences. if getattr(layer, 'zero_output_for_mask', None) is not None: layer.zero_output_for_mask = layer.return_sequences force_zero_output_for_mask(self.forward_layer) force_zero_output_for_mask(self.backward_layer) self.merge_mode = merge_mode if weights: nw = len(weights) self.forward_layer.initial_weights = weights[:nw // 2] self.backward_layer.initial_weights = weights[nw // 2:] self.stateful = layer.stateful self.return_sequences = layer.return_sequences self.return_state = layer.return_state self.supports_masking = True self._trainable = True self._num_constants = 0 self.input_spec = layer.input_spec def _verify_layer_config(self): """Ensure the forward and backward layers have valid common property.""" if self.forward_layer.go_backwards == self.backward_layer.go_backwards: raise ValueError('Forward layer and backward layer should have different ' '`go_backwards` value.') common_attributes = ('stateful', 'return_sequences', 'return_state') for a in common_attributes: forward_value = getattr(self.forward_layer, a) backward_value = getattr(self.backward_layer, a) if forward_value != backward_value: raise ValueError( 'Forward layer and backward layer are expected to have the same ' 'value for attribute {attr}, got {forward} and {backward}'.format( attr=a, forward=forward_value, backward=backward_value)) def _recreate_layer_from_config(self, layer, go_backwards=False): # When recreating the layer from its config, it is possible that the layer # is a RNN layer that contains custom cells. In this case we inspect the # layer and pass the custom cell class as part of the `custom_objects` # argument when calling `from_config`. # See https://github.com/tensorflow/tensorflow/issues/26581 for more detail. config = layer.get_config() if go_backwards: config['go_backwards'] = not config['go_backwards'] if 'custom_objects' in tf_inspect.getfullargspec( layer.__class__.from_config).args: custom_objects = {} cell = getattr(layer, 'cell', None) if cell is not None: custom_objects[cell.__class__.__name__] = cell.__class__ # For StackedRNNCells stacked_cells = getattr(cell, 'cells', []) for c in stacked_cells: custom_objects[c.__class__.__name__] = c.__class__ return layer.__class__.from_config(config, custom_objects=custom_objects) else: return layer.__class__.from_config(config) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): output_shape = self.forward_layer.compute_output_shape(input_shape) if not isinstance(output_shape, tensor_shape.TensorShape): output_shape = tensor_shape.TensorShape(output_shape) output_shape = tuple(output_shape.as_list()) if self.return_state: state_shape = output_shape[1:] output_shape = output_shape[0] if self.merge_mode == 'concat': output_shape = list(output_shape) output_shape[-1] *= 2 output_shape = tuple(output_shape) elif self.merge_mode is None: output_shape = [output_shape, copy.copy(output_shape)] if self.return_state: if self.merge_mode is None: return output_shape + state_shape + copy.copy(state_shape) return [output_shape] + state_shape + copy.copy(state_shape) return output_shape def __call__(self, inputs, initial_state=None, constants=None, **kwargs): """`Bidirectional.__call__` implements the same API as the wrapped `RNN`.""" inputs, initial_state, constants = _standardize_args( inputs, initial_state, constants, self._num_constants) if isinstance(inputs, list): if len(inputs) > 1: initial_state = inputs[1:] inputs = inputs[0] if initial_state is None and constants is None: return super(Bidirectional, self).__call__(inputs, **kwargs) # Applies the same workaround as in `RNN.__call__` additional_inputs = [] additional_specs = [] if initial_state is not None: # Check if `initial_state` can be splitted into half num_states = len(initial_state) if num_states % 2 > 0: raise ValueError( 'When passing `initial_state` to a Bidirectional RNN, ' 'the state should be a list containing the states of ' 'the underlying RNNs. ' 'Found: ' + str(initial_state)) kwargs['initial_state'] = initial_state additional_inputs += initial_state state_specs = [InputSpec(shape=K.int_shape(state)) for state in initial_state] self.forward_layer.state_spec = state_specs[:num_states // 2] self.backward_layer.state_spec = state_specs[num_states // 2:] additional_specs += state_specs if constants is not None: kwargs['constants'] = constants additional_inputs += constants constants_spec = [InputSpec(shape=K.int_shape(constant)) for constant in constants] self.forward_layer.constants_spec = constants_spec self.backward_layer.constants_spec = constants_spec additional_specs += constants_spec self._num_constants = len(constants) self.forward_layer._num_constants = self._num_constants self.backward_layer._num_constants = self._num_constants is_keras_tensor = K.is_keras_tensor(additional_inputs[0]) for tensor in additional_inputs: if K.is_keras_tensor(tensor) != is_keras_tensor: raise ValueError('The initial state of a Bidirectional' ' layer cannot be specified with a mix of' ' Keras tensors and non-Keras tensors' ' (a "Keras tensor" is a tensor that was' ' returned by a Keras layer, or by `Input`)') if is_keras_tensor: # Compute the full input spec, including state full_input = [inputs] + additional_inputs # The original input_spec is None since there could be a nested tensor # input. Update the input_spec to match the inputs. full_input_spec = [None for _ in range(len(nest.flatten(inputs))) ] + additional_specs # Removing kwargs since the value are passed with input list. kwargs['initial_state'] = None kwargs['constants'] = None # Perform the call with temporarily replaced input_spec original_input_spec = self.input_spec self.input_spec = full_input_spec output = super(Bidirectional, self).__call__(full_input, **kwargs) self.input_spec = original_input_spec return output else: return super(Bidirectional, self).__call__(inputs, **kwargs) def call(self, inputs, training=None, mask=None, initial_state=None, constants=None): """`Bidirectional.call` implements the same API as the wrapped `RNN`.""" kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training if generic_utils.has_arg(self.layer.call, 'mask'): kwargs['mask'] = mask if generic_utils.has_arg(self.layer.call, 'constants'): kwargs['constants'] = constants if generic_utils.has_arg(self.layer.call, 'initial_state'): if isinstance(inputs, list) and len(inputs) > 1: # initial_states are keras tensors, which means they are passed in # together with inputs as list. The initial_states need to be split into # forward and backward section, and be feed to layers accordingly. forward_inputs = [inputs[0]] backward_inputs = [inputs[0]] pivot = (len(inputs) - self._num_constants) // 2 + 1 # add forward initial state forward_inputs += inputs[1:pivot] if not self._num_constants: # add backward initial state backward_inputs += inputs[pivot:] else: # add backward initial state backward_inputs += inputs[pivot:-self._num_constants] # add constants for forward and backward layers forward_inputs += inputs[-self._num_constants:] backward_inputs += inputs[-self._num_constants:] forward_state, backward_state = None, None if 'constants' in kwargs: kwargs['constants'] = None elif initial_state is not None: # initial_states are not keras tensors, eg eager tensor from np array. # They are only passed in from kwarg initial_state, and should be passed # to forward/backward layer via kwarg initial_state as well. forward_inputs, backward_inputs = inputs, inputs half = len(initial_state) // 2 forward_state = initial_state[:half] backward_state = initial_state[half:] else: forward_inputs, backward_inputs = inputs, inputs forward_state, backward_state = None, None y = self.forward_layer(forward_inputs, initial_state=forward_state, **kwargs) y_rev = self.backward_layer(backward_inputs, initial_state=backward_state, **kwargs) else: y = self.forward_layer(inputs, **kwargs) y_rev = self.backward_layer(inputs, **kwargs) if self.return_state: states = y[1:] + y_rev[1:] y = y[0] y_rev = y_rev[0] if self.return_sequences: time_dim = 0 if getattr(self.forward_layer, 'time_major', False) else 1 y_rev = K.reverse(y_rev, time_dim) if self.merge_mode == 'concat': output = K.concatenate([y, y_rev]) elif self.merge_mode == 'sum': output = y + y_rev elif self.merge_mode == 'ave': output = (y + y_rev) / 2 elif self.merge_mode == 'mul': output = y * y_rev elif self.merge_mode is None: output = [y, y_rev] else: raise ValueError( 'Unrecognized value for `merge_mode`: %s' % (self.merge_mode)) if self.return_state: if self.merge_mode is None: return output + states return [output] + states return output def reset_states(self): self.forward_layer.reset_states() self.backward_layer.reset_states() def build(self, input_shape): with K.name_scope(self.forward_layer.name): self.forward_layer.build(input_shape) with K.name_scope(self.backward_layer.name): self.backward_layer.build(input_shape) self.built = True def compute_mask(self, inputs, mask): if isinstance(mask, list): mask = mask[0] if self.return_sequences: if not self.merge_mode: output_mask = [mask, mask] else: output_mask = mask else: output_mask = [None, None] if not self.merge_mode else None if self.return_state: states = self.forward_layer.states state_mask = [None for _ in states] if isinstance(output_mask, list): return output_mask + state_mask * 2 return [output_mask] + state_mask * 2 return output_mask @property def constraints(self): constraints = {} if hasattr(self.forward_layer, 'constraints'): constraints.update(self.forward_layer.constraints) constraints.update(self.backward_layer.constraints) return constraints def get_config(self): config = {'merge_mode': self.merge_mode} if self._num_constants: config['num_constants'] = self._num_constants if hasattr(self, '_backward_layer_config'): config['backward_layer'] = self._backward_layer_config base_config = super(Bidirectional, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): # Instead of updating the input, create a copy and use that. config = copy.deepcopy(config) num_constants = config.pop('num_constants', 0) # Handle forward layer instantiation (as would parent class). from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top config['layer'] = deserialize_layer( config['layer'], custom_objects=custom_objects) # Handle (optional) backward layer instantiation. backward_layer_config = config.pop('backward_layer', None) if backward_layer_config is not None: backward_layer = deserialize_layer( backward_layer_config, custom_objects=custom_objects) config['backward_layer'] = backward_layer # Instantiate the wrapper, adjust it and return it. layer = cls(**config) layer._num_constants = num_constants return layer
the-stack_0_8166
"""Ajout vigilance meteo Revision ID: 901a31d192ad Revises: dcffac33e4fd Create Date: 2021-11-26 16:35:51.243300 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '901a31d192ad' down_revision = 'dcffac33e4fd' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('vigilance_meteo', sa.Column('id', sa.Integer(), nullable=False), sa.Column('zone_id', sa.Integer(), nullable=True), sa.Column('phenomene_id', sa.Integer(), nullable=True), sa.Column('date_export', sa.DateTime(), nullable=True), sa.Column('couleur_id', sa.Integer(), nullable=True), sa.Column('validity', postgresql.TSTZRANGE(), nullable=False), sa.Column('to_show', postgresql.DATERANGE(), nullable=False), sa.ForeignKeyConstraint(['zone_id'], ['indice_schema.zone.id'], ), sa.PrimaryKeyConstraint('id'), schema='indice_schema' ) op.create_index('vigilance_zone_phenomene_date_export_idx', 'vigilance_meteo', ['zone_id', 'phenomene_id', 'date_export'], unique=False, schema='indice_schema') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index('vigilance_zone_phenomene_date_export_idx', table_name='vigilance_meteo', schema='indice_schema') op.drop_table('vigilance_meteo', schema='indice_schema') # ### end Alembic commands ###
the-stack_0_8167
""" [PYTHON NAMING CONVENTION] module_name, package_name, ClassName, method_name, ExceptionName, function_name, GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name. """ import sys, os import cv2 import re import pprint import numpy as np import time, datetime import pickle from modules.utils import ( my_print, quaternion2euler, camel2snake, snake2camel, MyVideo, str2float) from modules.constants import Constants try: import mujoco_py as mjPy except ImportError as e: raise error.DependencyNotInstalled( "{}. (HINT: you need to install mujoco_py, \ and also perform the setup instructions here: \ https://github.com/openai/mujoco-py/.)".format( e ) ) # from mujoco_py import class Simulation( ): """ Running a single Whip Simulation [INHERITANCE] [DESCRIPTION] [NOTE] All of the model files are saved in "models" directory, and we are using "relative directory" to generate and find the .xml model file. Hence do not change of "model directory" variable within this """ MODEL_DIR = Constants.MODEL_DIR SAVE_DIR = Constants.SAVE_DIR VISUALIZE = True current_time = 0 controller = None # Control input function def __init__( self, model_name = None, is_visualize = True, arg_parse = None ): """ Default constructor of THIS class [ARGUMENTS] [NAME] [TYPE] [DESCRIPTION] (1) model_name string The xml model file name for running the MuJoCo simulation. (2) is_visualized boolean Turn ON/OFF the mjViewer (visualizer) of the simulation. This flag is useful when optimizing a simulation. (3) arg_parse dictionary Dictionary which contains all the arguments given to the main `run.py` script. """ if model_name is None: self.mjModel = None self.mjSim = None self.mjData = None self.mjViewer = None self.args = arg_parse my_print( WARNING = "MODEL FILE NOT GIVEN, PLEASE INPUT XML MODEL FILE WITH `attach_model` MEMBER FUNCTION" ) else: # If model_name is given, then check if there exist ".xml" at the end, if not, append model_name = model_name + ".xml" if model_name[ -4: ] != ".xml" else model_name self.model_name = model_name # Based on the model_name, construct the simulation. self.mjModel = mjPy.load_model_from_path( self.MODEL_DIR + model_name ) # Loading xml model as and save it as "model" self.mjSim = mjPy.MjSim( self.mjModel ) # Construct the simulation environment and save it as "sim" self.mjData = self.mjSim.data # Construct the basic MuJoCo data and save it as "mjData" self.mjViewer = mjPy.MjViewerBasic( self.mjSim ) if is_visualize else None # Construct the basic MuJoCo viewer and save it as "myViewer" self.args = arg_parse # Saving the default simulation variables self.fps = 60 # Frames per second for the mujoco render self.dt = self.mjModel.opt.timestep # Time step of the simulation [sec] self.sim_step = 0 # Number of steps of the simulation, in integer [-] self.update_rate = round( 1 / self.dt / self.fps ) # 1/dt = number of steps N for 1 second simulaiton, dividing this with frames-per-second (fps) gives us the frame step to be updated. self.g = self.mjModel.opt.gravity # Calling the gravity vector of the simulation environment # Saving additional model parameters for multiple purposes self.act_names = self.mjModel.actuator_names self.geom_names = self.mjModel.geom_names self.idx_geom_names = [ self.mjModel._geom_name2id[ name ] for name in self.geom_names ] self.n_acts = len( self.mjModel.actuator_names ) self.n_limbs = '-'.join( self.mjModel.body_names ).lower().count( 'arm' ) self.run_time = float( self.args[ 'runTime' ] ) # Run time of the total simulation self.start_time = float( self.args[ 'startTime' ] ) # Start time of the movements self.VISUALIZE = is_visualize # saving the VISUALIZE Flag def attach_model( self, model_name ): if self.mjModel is not None: my_print( WARNING = "MODEL FILE EXIST! OVERWRITTING THE WHOLE MUJOCO FILE" ) self.__init__( model_name ) def attach_controller( self, controller_name ): """ Attaching the controller object for running the simulation. For detailed controller description, please check "controllers.py" """ self.controller = controller_name def set_initial_condition( self ): """ Manually setting the initial condition of the system. """ if "_w_" in self.model_name: # If whip is attached to the model. tmp = self.mjData.get_body_xquat( "node1" ) # Getting the quaternion angle of the whip handle yaw, pitch, roll = quaternion2euler( tmp ) self.mjData.qpos[ self.n_acts ] = - roll # Setting the handle posture to make the whip being straight down at equilibrium. self.mjData.qpos[ self.n_acts + 1 ] = + pitch # Setting the handle posture to make the whip being straight down at equilibrium. self.mjSim.forward() # Running the forward kinematics, or setting the model as the given qpos WITHOUT proceeding the time step. Therefore no simulation time step is executed. def run( self ): """ Running a single simulation. [INPUT] [VAR NAME] [TYPE] [DESCRIPTION] (1) run_time float The whole run time of the simulation. (2) ctrl_start_time float """ # Check if mjModel or mjSim is empty and raise error if self.mjModel is None or self.mjSim is None: raise ValueError( "mjModel and mjSim is Empty! Add it before running simulation" ) # Warn the user if input and output function is empty if self.controller is None: raise ValueError( "CONTROLLER NOT ATTACHED TO SIMULATION. \ PLEASE REFER TO METHOD 'attach_output_function' and 'attach_controller' " ) if self.args[ 'recordVideo' ]: vid = MyVideo( fps = self.fps * float( self.args[ 'vidRate' ] ), vid_dir = self.args[ 'saveDir' ] ) # If args doesn't have saveDir attribute, save vid_dir as None if self.args[ 'saveData' ]: file = open( self.args[ 'saveDir' ] + "data_log.txt", "w+" ) # Setting the camera position for the simulation # [camParameters]: [ 0.17051, 0.21554, -0.82914, 2.78528,-30.68421,162.42105 ] # [camParameters]: [ -0.10325, 0. , -2.51498, 7.278 ,-45. , 90. ] if self.args[ 'camPos' ] is not None: tmp = str2float( self.args[ 'camPos' ] ) self.mjViewer.cam.lookat[ 0:3 ] = tmp[ 0 : 3 ] self.mjViewer.cam.distance = tmp[ 3 ] self.mjViewer.cam.elevation = tmp[ 4 ] self.mjViewer.cam.azimuth = tmp[ 5 ] self.set_initial_condition( ) # Setting initial condition. Some specific controllers need to specify the initial condition while self.current_time <= self.run_time: if self.sim_step % self.update_rate == 0: if self.mjViewer is not None: self.mjViewer.render( ) # Render the simulation my_print( currentTime = self.current_time, a = self.controller.a ) if self.args[ 'verbose' ]: my_print( camParameters = [ self.mjViewer.cam.lookat[ 0 ], self.mjViewer.cam.lookat[ 1 ], self.mjViewer.cam.lookat[ 2 ], self.mjViewer.cam.distance, self.mjViewer.cam.elevation, self.mjViewer.cam.azimuth ] ) if self.args[ 'recordVideo' ]: vid.write( self.mjViewer ) if self.args[ 'saveData' ]: my_print( currentTime = self.current_time, jointAngleActual = self.mjData.qpos[ : ], geomXYZPositions = self.mjData.geom_xpos[ self.idx_geom_names ], desiredTrajectory = self.controller.traj_pos[ : ], trajectoryError = self.controller.traj_pos[ : ] - self.mjData.get_geom_xpos( "EEGEOM" ) if self.controller.type == 2 else self.controller.traj_pos[ : ] - self.mjData.qpos[ : ], file = file ) # [input controller] # input_ref: The data array that are aimed to be inputted (e.g., qpos, qvel, qctrl etc.) # input_idx: The specific index of input_ref data array that should be inputted # input: The actual input value which is inputted to input_ref input_ref, input_idx, input = self.controller.input_calc( self.start_time, self.current_time ) input_ref[ input_idx ] = input self.mjSim.step( ) # Single step update if( self.is_sim_unstable() ): # Check if simulation is stable # If not optimization, and result unstable, then save the detailed data print( "[WARNING] UNSTABLE SIMULATION, HALTED AT {0:f} for at {1:f}".format( self.current_time, self.run_time ) ) if self.args[ 'saveData' ]: print( "[WARNING] UNSTABLE SIMULATION, HALTED AT {0:f} for at {1:f}".format( self.current_time, self.run_time ), file = file ) file.close( ) break self.current_time = self.mjData.time # Update the current_time variable of the simulation if self.sim_step % self.update_rate == 0: my_print( trajectoryError = self.controller.traj_pos[ : ] - self.mjData.get_geom_xpos( "EEGEOM" ) if self.controller.type == 2 else self.controller.traj_pos[ : ] - self.mjData.qpos[ : ],) if self.args[ 'saveData' ]: # Saving all the necessary datas for the simulation my_print( inputVal = input, file = file ) self.sim_step += 1 if self.args[ 'recordVideo' ]: vid.release( ) # If simulation is finished, wrap-up the video file. if self.args[ 'saveData' ]: file.close() def save_simulation_data( self, dir ): """ Save all the details of the controller parameters, inputs and output of the simulation """ if dir is not None and dir[ -1 ] != "/": # Quick Check of whether result_dir has backslash "/" at the end dir += "/" # Append the backslash # [TIP] [MOSES] # By using the "with" function you don't need to call f.close( ), the file will automatically close the opened file. # [REF] https://lerner.co.il/2015/01/18/dont-use-python-close-files-answer-depends/ with open( dir + "simulation_details.txt", "w+" ) as f: pprint.pprint( self.controller.__dict__, f ) # Using pretty-print (pprint) to flush out the data in a much readable format print( self.args , file = f ) # Flushing out all the arguments detail. def is_sim_unstable( self ): thres = 1 * 10 ** 6 if ( max( np.absolute( self.mjData.qpos ) ) > thres ) or \ ( max( np.absolute( self.mjData.qvel ) ) > thres ) or \ ( max( np.absolute( self.mjData.qacc ) ) > thres ): return True else: return False def reset( self ): """ Reseting the mujoco simulation """ self.current_time = 0 self.sim_step = 0 self.mjSim.reset( )
the-stack_0_8169
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class SkusOperations(object): """SkusOperations operations. You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client API version. Constant value: "2019-11-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2019-11-01" self.config = config def list( self, custom_headers=None, raw=False, **operation_config): """Get the list of StorageCache.Cache SKUs available to this subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ResourceSku :rtype: ~azure.mgmt.storagecache.models.ResourceSkuPaged[~azure.mgmt.storagecache.models.ResourceSku] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ResourceSkuPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'}
the-stack_0_8172
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy from .._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class ResourceManagementClientConfiguration(Configuration): """Configuration for ResourceManagementClient. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any ) -> None: super(ResourceManagementClientConfiguration, self).__init__(**kwargs) if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") self.credential = credential self.subscription_id = subscription_id self.api_version = "2019-07-01" self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs: Any ) -> None: self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
the-stack_0_8173
import math import mpmath import numpy as np from PIL import Image import os class EvenDimensionError(Exception): # The required resolution for the image is a square with a center pixel that # has the same number of pixels to the left, to the right, above, and # underneath, which precludes any even number of pixels. pass class NumberError(Exception): # For a test of prime numbers, to make sure the number is a non-negative # integer. pass class UlamSpiral: ''' Makes Ulam spirals of any arbitrary sequence. A True/False function is required for each sequence. ''' SIDE = None # The length of the square. image_array = [] # Pixel color data. # If the PNG directory is not made, make it. IMAGE_DIR = os.path.dirname(os.path.realpath(__file__)) IMAGE_DIR = os.path.join(IMAGE_DIR, 'PNG') if not os.path.exists(IMAGE_DIR): os.makedirs(IMAGE_DIR) COLORS = None # Color palettes. is_prime_list = [] # A list for primes so recalculating is not necessary. is_not_prime_list = [] # To avoid recalculation. image_size = 1600 # For scaling the final image. image_size = (image_size, image_size) # The final image resolution. DIGITS_OF_PI = None # To store digits of pi. def __init__(self, sides=[41], modes=['prime'], colors=[[255, 255, 255]], debug_tests=False): ''' Make a basic introduction of the image essentails and specific initializations for sequences that require it. ''' if 'a037003' in modes or debug_tests: self.bake_pi(max(sides)**2+1) if 'a050704' in modes: for x in range(max(sides)**2+1): is_prime = self.is_prime(x) if x%1000 == 0: print('primes ' + str(x) + ' of ' + str(max(sides)**2)) if debug_tests: self.debug_tests() for i, mode in enumerate(modes): self.COLORS = colors[i%len(colors)] for side in sides: if side%2 == 0: raise EvenDimensionError self.SIDE = side self.CENTER = int((self.SIDE-1)/2+1) self.CENTER = [self.CENTER, self.CENTER] self.IMAGE_F = str(mode) + ' {:,}'.format(self.SIDE**2) + \ '.png' self.IMAGE_PATH = os.path.join(self.IMAGE_DIR, self.IMAGE_F) self.image_array = np.zeros((self.SIDE, self.SIDE, 3), \ dtype=np.uint8) self.calc_pixels(mode) self.write_image(mode) def calc_pixels(self, mode): ''' Follow the path requirements of an Ulam spiral. With each step, test the pixel for the sequence. ''' cursor = self.CENTER.copy() # The center pixel. # For the color palettes, keep count the concentric squares. That number # decides, for a pixel who passes the sequence test, which color from # the palettes to choose. num_square = 0 for x in range(1, self.SIDE**2+1): if x%100 == 0: print(str(mode) + ' {:,}'.format(x) + ' of ' + \ '{:,}'.format(self.SIDE**2)) if x > 1: if cursor == [self.CENTER[0]+num_square, self.CENTER[1]+num_square]: cursor[1] += 1 num_square += 1 elif cursor == [self.CENTER[0]+num_square, self.CENTER[1]-num_square]: cursor[1] += 1 elif cursor == [self.CENTER[0]-num_square, self.CENTER[1]-num_square]: cursor[0] += 1 elif cursor == [self.CENTER[0]-num_square, self.CENTER[1]+num_square]: cursor[1] -= 1 elif cursor[1] == self.CENTER[0]+num_square: cursor[0] -= 1 elif cursor[0] == self.CENTER[1]-num_square: cursor[1] -= 1 elif cursor[1] == self.CENTER[0]-num_square: cursor[0] += 1 elif cursor[0] == self.CENTER[1]+num_square: cursor[1] += 1 self.test_pixel(cursor, num_square, x, mode) def test_pixel(self, cursor, num_square, x, mode): ''' The hub for tests of a pixel's presence in a sequence. t_f is True or False depending on if it is or is not a part of the sequence. If true, change the pixel color. ''' t_f = None if mode == 'prime': t_f = self.is_prime(x) elif mode == 'triangular': t_f = self.is_triangular(x) elif mode == 'square': t_f = self.is_square(x) elif mode == 'pentagonal': t_f = self.is_pentagonal(x) elif mode == 'hexagonal': t_f = self.is_hexagonal(x) elif mode == 'heptagonal': t_f = self.is_heptagonal(x) elif mode == 'octogonal': t_f = self.is_octogonal(x) elif mode == 'nonagonal': t_f = self.is_nonagonal(x) elif mode == 'decagonal': t_f = self.is_decagonal(x) elif mode == 'hendecagonal': t_f = self.is_hendecagonal(x) elif mode == 'dodecagonal': t_f = self.is_dodecagonal(x) elif mode == 'fibonacci': t_f = self.is_fibonacci(x) elif mode == 'factorial': t_f = self.is_factorial(x) elif mode == 'mersenne_prime': t_f = self.is_mersenne_prime(x) elif mode == 'a030513': t_f = self.is_a030513(x) elif mode == 'a050704': t_f = self.is_a050704(x) elif mode == 'a037003': t_f = self.is_a037003(x) if t_f: color = self.COLORS[num_square % len(self.COLORS)] self.image_array[cursor[0]-1, cursor[1]-1][0] = color[0] self.image_array[cursor[0]-1, cursor[1]-1][1] = color[1] self.image_array[cursor[0]-1, cursor[1]-1][2] = color[2] def write_image(self, mode): ''' Write the finalized pixel color values to a PNG. ''' image = Image.fromarray(self.image_array) image = image.resize(self.image_size, Image.NEAREST) image.save(self.IMAGE_PATH) def is_prime(self, x): ''' Return True if x is prime and False otherwise. ''' if x in self.is_prime_list: return True elif x in self.is_not_prime_list: return False if not isinstance(x, int) or x < 0: raise NumberError if x==0 or x == 1: self.is_not_prime_list.append(x) return False if x == 2: self.is_prime_list.append(x) return True for y in range(2, math.floor(math.sqrt(x))+1): if x%y == 0: self.is_not_prime_list.append(x) return False self.is_prime_list.append(x) return True def is_triangular(self, x): ''' Return True if x is triangular and False otherwise. ''' for y in range(1, x+1): t = (y*(y+1)) / 2 if t == x: return True elif t > x: return False def is_square(self, x): ''' Return True if x is square and False otherwise. ''' for y in range(1, x+1): s = y**2 if s == x: return True elif s > x: return False def is_pentagonal(self, x): ''' Return True if x is pentagonal and False otherwise. ''' for y in range(1, x+1): p = (y*(3*y-1)) / 2 if p == x: return True elif p > x: return False def is_hexagonal(self, x): ''' Return True if x is hexagonal and False otherwise. ''' for y in range(1, x+1): h = y*(2*y-1) if h == x: return True elif h > x: return False def is_heptagonal(self, x): ''' Return True if x is heptagonal and False otherwise. ''' for y in range(1, x+1): h = (y*(5*y-3)) / 2 if h == x: return True elif h > x: return False def is_octogonal(self, x): ''' Return True if x is octogonal and False otherwise. ''' for y in range(1, x+1): o = y*(3*y-2) if o == x: return True elif o > x: return False def is_nonagonal(self, x): ''' Return True if x is nonagonal and False otherwise. ''' for y in range(1, x+1): n = (y*(7*y-5)) / 2 if n == x: return True elif n > x: return False def is_decagonal(self, x): ''' Return True if x is decagonal and False otherwise. ''' for y in range(1, x+1): d = 4*y**2 - 3*y if d == x: return True elif d > x: return False def is_hendecagonal(self, x): ''' Return True if x is hendecagonal and False otherwise. ''' for y in range(1, x+1): h = (9*y**2 - 7*y) / 2 if h == x: return True elif h > x: return False def is_dodecagonal(self, x): ''' Return True if x is dodecagonal and False otherwise. ''' for y in range(1, x+1): d = 5*y**2 - 4*y if d == x: return True elif d > x: return False def is_fibonacci(self, x): ''' Return True for numbers in the Fibonacci sequence and False otherwise. ''' f1 = 0 f2 = 1 while True: f = f1 + f2 if f == x: return True elif f > x: return False else: f1 = f2 f2 = f def is_factorial(self, x): ''' Return True for factorials and False otherwise. ''' for y in range(1, x+1): f = 1 for z in reversed(range(1, y+1)): f = f*z if f == x: return True elif f > x: return False def is_mersenne_prime(self, x): ''' Return True for Mersenne primes and False otherwise. ''' for y in range(1, x+1): m = 2**y-1 if m == x and self.is_prime(x): return True elif m > x: return False def is_a030513(self, x): ''' Return True for numbers in A030513 and False otherwise. https://oeis.org/A030513 Numbers with 4 divisors. ''' divisors = [] for y in range(1, x+1): if x%y == 0: divisors.append(y) if len(divisors) > 4: return False if len(divisors) == 4: return True else: return False def is_a050704(self, x): ''' Return True for numbers in A050704 and False otherwise. https://oeis.org/A050704 Composite numbers k with the property that k minus the sum of the prime factors of k is prime. ''' if x == 1: return False primes = [] prime_factors = [] if self.is_prime(x): primes.append(x) else: primes = [p for p in self.is_prime_list if p <= x/2] d = x for prime in primes: while True: if d%prime == 0: prime_factors.append(prime) d //= prime else: break sum_of_prime_factors = 0 for prime_factor in prime_factors: sum_of_prime_factors += prime_factor k_minus_sum_of_prime_factors = x - sum_of_prime_factors if self.is_prime(k_minus_sum_of_prime_factors): return True else: return False def is_a037003(self, x): ''' Return True for numbers in A037003 and False otherwise. https://oeis.org/A037003 Positions of the digit '4' in the decimal expansion of Pi. ''' if self.DIGITS_OF_PI[x-1] == '4': return True else: return False def bake_pi(self, num_digits): ''' Set the value of DIGITS_OF_PI to the decimal expansion of pi for any arbitrary length. ''' mpmath.mp.dps = num_digits pi = mpmath.mp.pi self.DIGITS_OF_PI = str(pi)[2:] def debug_tests(self): ''' Outputs a list of numbers in the sequences for verification. ''' list_={'prime': [], 'triangular': [], 'square': [], 'pentagonal': [], 'hexagonal': [], 'heptagonal': [], 'hexagonal': [], 'octogonal': [], 'nonagonal': [], 'decagonal': [], 'hendecagonal': [], 'dodecagonal': [], 'fibonacci': [], 'factorial': [], 'mersenne_prime': [], 'a030513': [], 'a050704': [], 'a037003': []} for x in range(1, 100): if self.is_prime(x): list_['prime'].append(x) if self.is_triangular(x): list_['triangular'].append(x) if self.is_square(x): list_['square'].append(x) if self.is_pentagonal(x): list_['pentagonal'].append(x) if self.is_hexagonal(x): list_['hexagonal'].append(x) if self.is_heptagonal(x): list_['heptagonal'].append(x) if self.is_octogonal(x): list_['octogonal'].append(x) if self.is_nonagonal(x): list_['nonagonal'].append(x) if self.is_decagonal(x): list_['decagonal'].append(x) if self.is_hendecagonal(x): list_['hendecagonal'].append(x) if self.is_dodecagonal(x): list_['dodecagonal'].append(x) if self.is_fibonacci(x): list_['fibonacci'].append(x) if self.is_factorial(x): list_['factorial'].append(x) if self.is_mersenne_prime(x): list_['mersenne_prime'].append(x) if self.is_a030513(x): list_['a030513'].append(x) if self.is_a050704(x): list_['a050704'].append(x) if self.is_a037003(x): list_['a037003'].append(x) input(list_) if __name__ == '__main__': pass
the-stack_0_8175
from bddrest import response, when, status from nanohttp import json from sqlalchemy import Unicode, Integer from restfulpy.controllers import JSONPatchControllerMixin, ModelRestController from restfulpy.orm import commit, DeclarativeBase, Field, DBSession, \ FilteringMixin, PaginationMixin, OrderingMixin, ModifiedMixin from restfulpy.testing import ApplicableTestCase from restfulpy.exceptions import SQLError class SQLErrorCheckingModel( ModifiedMixin, FilteringMixin, PaginationMixin, OrderingMixin, DeclarativeBase ): __tablename__ = 'sql_error_checking_model' id = Field(Integer, primary_key=True) title = Field(Unicode(50), unique=True, nullable=False) class Root(ModelRestController): __model__ = SQLErrorCheckingModel @json @commit def post(self): m = SQLErrorCheckingModel() m.update_from_request() DBSession.add(m) return m @json @SQLErrorCheckingModel.expose def get(self, title: str=None): query = SQLErrorCheckingModel.query if title: return query.filter(SQLErrorCheckingModel.title == title)\ .one_or_none() return query class TestSqlExceptions(ApplicableTestCase): __controller_factory__ = Root def test_sql_errors(self): with self.given( 'Testing SQL exceptions', '/', 'POST', form=dict(title='test') ): assert response.json['title'] == 'test' when('Posting gain to raise a unique_violation sql error') assert status == 409 def test_invalid_sql_error(self): assert '500 Internal server error' == SQLError.map_exception(ValueError())
the-stack_0_8176
import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.models.layers import DropPath, to_2tuple, trunc_normal_ class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x class WindowAttention(nn.Module): r""" Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 """ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer("relative_position_index", relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self, x, mask=None): """ Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose(-2, -1)) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x def extra_repr(self) -> str: return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' def flops(self, N): # calculate flops for 1 window with token length of N flops = 0 # qkv = self.qkv(x) flops += N * self.dim * 3 * self.dim # attn = (q @ k.transpose(-2, -1)) flops += self.num_heads * N * (self.dim // self.num_heads) * N # x = (attn @ v) flops += self.num_heads * N * N * (self.dim // self.num_heads) # x = self.proj(x) flops += N * self.dim * self.dim return flops class SwinTransformerBlock(nn.Module): r""" Swin Transformer Block. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resulotion. num_heads (int): Number of attention heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.input_resolution = input_resolution self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio if min(self.input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = 0 self.window_size = min(self.input_resolution) assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" self.norm1 = norm_layer(dim) self.attn = WindowAttention( dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if self.shift_size > 0: # calculate attention mask for SW-MSA H, W = self.input_resolution img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask) def forward(self, x): H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" shortcut = x x = self.norm1(x) x = x.view(B, H, W, C) # cyclic shift if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x # partition windows x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C # reverse cyclic shift if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x x = x.view(B, H * W, C) # FFN x = shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x def extra_repr(self) -> str: return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" def flops(self): flops = 0 H, W = self.input_resolution # norm1 flops += self.dim * H * W # W-MSA/SW-MSA nW = H * W / self.window_size / self.window_size flops += nW * self.attn.flops(self.window_size * self.window_size) # mlp flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio # norm2 flops += self.dim * H * W return flops class PatchMerging(nn.Module): r""" Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, x): """ x: B, H*W, C """ H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." x = x.view(B, H, W, C) x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C x = self.norm(x) x = self.reduction(x) return x def extra_repr(self) -> str: return f"input_resolution={self.input_resolution}, dim={self.dim}" def flops(self): H, W = self.input_resolution flops = H * W * self.dim flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim return flops class BasicLayer(nn.Module): """ A basic Swin Transformer layer for one stage. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resolution. depth (int): Number of blocks. num_heads (int): Number of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. """ def __init__(self, dim, input_resolution, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.use_checkpoint = use_checkpoint # build blocks self.blocks = nn.ModuleList([ SwinTransformerBlock(dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) else: self.downsample = None def forward(self, x): for blk in self.blocks: if self.use_checkpoint: x = checkpoint.checkpoint(blk, x) else: x = blk(x) if self.downsample is not None: x = self.downsample(x) return x def extra_repr(self) -> str: return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" def flops(self): flops = 0 for blk in self.blocks: flops += blk.flops() if self.downsample is not None: flops += self.downsample.flops() return flops class PatchEmbed(nn.Module): r""" Image to Patch Embedding Args: img_size (int): Image size. Default: 224. patch_size (int): Patch token size. Default: 4. in_chans (int): Number of input image channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] self.img_size = img_size self.patch_size = patch_size self.patches_resolution = patches_resolution self.num_patches = patches_resolution[0] * patches_resolution[1] self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, x): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C if self.norm is not None: x = self.norm(x) return x def flops(self): Ho, Wo = self.patches_resolution flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) if self.norm is not None: flops += Ho * Wo * self.embed_dim return flops class SwinTransformer(nn.Module): r""" Swin Transformer A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Args: img_size (int | tuple(int)): Input image size. Default 224 patch_size (int | tuple(int)): Patch size. Default: 4 in_chans (int): Number of input image channels. Default: 3 num_classes (int): Number of classes for classification head. Default: 1000 embed_dim (int): Patch embedding dimension. Default: 96 depths (tuple(int)): Depth of each Swin Transformer layer. num_heads (tuple(int)): Number of attention heads in different layers. window_size (int): Window size. Default: 7 mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None drop_rate (float): Dropout rate. Default: 0 attn_drop_rate (float): Attention dropout rate. Default: 0 drop_path_rate (float): Stochastic depth rate. Default: 0.1 norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. ape (bool): If True, add absolute position embedding to the patch embedding. Default: False patch_norm (bool): If True, add normalization after patch embedding. Default: True use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False """ def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) trunc_normal_(self.absolute_pos_embed, std=.02) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # build layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer), input_resolution=(patches_resolution[0] // (2 ** i_layer), patches_resolution[1] // (2 ** i_layer)), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layer) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'absolute_pos_embed'} @torch.jit.ignore def no_weight_decay_keywords(self): return {'relative_position_bias_table'} def forward_features(self, x): x = self.patch_embed(x) if self.ape: x = x + self.absolute_pos_embed x = self.pos_drop(x) for layer in self.layers: x = layer(x) x = self.norm(x) # B L C x = self.avgpool(x.transpose(1, 2)) # B C 1 x = torch.flatten(x, 1) return x def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def flops(self): flops = 0 flops += self.patch_embed.flops() for i, layer in enumerate(self.layers): flops += layer.flops() flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers) flops += self.num_features * self.num_classes return flops def swin_transformer(**kwargs): net = SwinTransformer(**kwargs) return net if __name__ == "__main__": net = swin_transformer(img_size=224, patch_size=4, in_chans=3, num_classes=5) from torchsummary import summary import os os.environ['CUDA_VISIBLE_DEVICES'] = '2' net = net.cuda() summary(net,input_size=(3,224,224),batch_size=1,device='cuda')
the-stack_0_8179
# coding=utf8 import numpy as np def rerec(bbox): ''' Convert to square :param bbox: :return: ''' h = bbox[:, 2] - bbox[:, 0] + 1 w = bbox[:, 3] - bbox[:, 1] + 1 max_l = np.maximum(h, w) bbox[:, 0] = np.round(bbox[:, 0] + (h - max_l) * 0.5) bbox[:, 1] = np.round(bbox[:, 1] + (w - max_l) * 0.5) bbox[:, 2] = bbox[:, 0] + max_l - 1 bbox[:, 3] = bbox[:, 1] + max_l - 1 return bbox
the-stack_0_8182
import os from binascii import unhexlify import pytest from cose.algorithms import EdDSA from cose.keys.curves import Ed448, Ed25519, X448, X25519 from cose.exceptions import CoseInvalidKey, CoseIllegalKeyType, CoseUnsupportedCurve, CoseException, CoseIllegalKeyOps from cose.keys import OKPKey, CoseKey from cose.keys.keyops import SignOp, MacVerifyOp from cose.keys.keyparam import KpKty, OKPKpCurve, OKPKpX, OKPKpD, KpAlg, KpKeyOps ############################################################### # OKP key checks ############################################################### from cose.keys.keytype import KtyOKP, KtyEC2, KtySymmetric def _is_valid_okp_key(key: OKPKey): check1 = (KpKty in key and OKPKpCurve in key) and (OKPKpX in key or OKPKpD in key) check2 = key[OKPKpCurve] in [X25519, X448, Ed25519, Ed448] return check2 and check1 @pytest.mark.parametrize('kty_attr, kty_value', [(KpKty, KtyOKP), ('KTY', 'OKP'), (1, 1), (KpKty, 'OKP'), (KpKty, 1), ('KTY', KtyOKP), ('KTY', 1), (1, KtyOKP), (1, 'OKP')]) @pytest.mark.parametrize('crv_attr, crv_value', [(OKPKpCurve, X25519), ('CURVE', X25519), (-1, X25519)]) @pytest.mark.parametrize('x_attr, x_value', [(OKPKpX, os.urandom(32)), ('X', os.urandom(32)), (-2, os.urandom(32))]) @pytest.mark.parametrize('d_attr, d_value', [(OKPKpD, os.urandom(32)), ('D', os.urandom(32)), (-4, os.urandom(32))]) def test_okp_keys_from_dicts(kty_attr, kty_value, crv_attr, crv_value, x_attr, x_value, d_attr, d_value): # The public and private values used in this test do not form a valid elliptic curve key, # but we don't care about that here d = {kty_attr: kty_value, crv_attr: crv_value, x_attr: x_value, d_attr: d_value} cose_key = CoseKey.from_dict(d) assert _is_valid_okp_key(cose_key) @pytest.mark.parametrize('kty_attr, kty_value', [(KpKty, KtyOKP), ('KTY', 'OKP'), (1, 1)]) @pytest.mark.parametrize('crv_attr, crv_value', [(OKPKpCurve, Ed25519)]) @pytest.mark.parametrize('d_attr, d_value', [(OKPKpD, os.urandom(32)), ('D', os.urandom(32)), (-4, os.urandom(32))]) def test_okp_private_key_from_dicts(kty_attr, kty_value, crv_attr, crv_value, d_attr, d_value): # The public and private values used in this test do not form a valid elliptic curve key, # but we don't care about that here d = {kty_attr: kty_value, crv_attr: crv_value, d_attr: d_value} cose_key = CoseKey.from_dict(d) assert _is_valid_okp_key(cose_key) @pytest.mark.parametrize('kty_attr, kty_value', [(KpKty, KtyOKP), ('KTY', 'OKP'), (1, 1)]) @pytest.mark.parametrize('crv_attr, crv_value', [(OKPKpCurve, Ed448), ('CURVE', Ed448), (-1, Ed448)]) @pytest.mark.parametrize('x_attr, x_value', [(OKPKpX, os.urandom(32)), ('X', os.urandom(32)), (-2, os.urandom(32))]) def test_okp_public_keys_from_dicts(kty_attr, kty_value, crv_attr, crv_value, x_attr, x_value): # The public and private values used in this test do not form a valid elliptic curve key, # but we don't care about that here d = {kty_attr: kty_value, crv_attr: crv_value, x_attr: x_value} cose_key = CoseKey.from_dict(d) assert _is_valid_okp_key(cose_key) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448, 4, 'X25519', 'X448']) def test_okp_key_generation_encoding_decoding(crv): trails = 256 for i in range(trails): okp_test = OKPKey.generate_key(crv=crv) okp_encoded = okp_test.encode() okp_decoded = CoseKey.decode(okp_encoded) assert _is_valid_okp_key(okp_decoded) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448, 'X25519', 4, 5]) def test_okp_key_generation(crv): key = OKPKey.generate_key(crv) assert _is_valid_okp_key(key) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448]) def test_okp_key_construction(crv): key = OKPKey(crv=crv, x=os.urandom(32), d=os.urandom(32), optional_params={'ALG': 'EDDSA'}) assert _is_valid_okp_key(key) serialized = key.encode() _ = CoseKey.decode(serialized) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448]) def test_fail_on_missing_key_values(crv): with pytest.raises(CoseInvalidKey) as excinfo: _ = OKPKey(crv=crv) assert "Either the public values or the private value must be specified" in str(excinfo.value) def test_fail_on_missing_crv_attr(): cose_key = {KpKty: KtyOKP, OKPKpX: os.urandom(32), OKPKpD: os.urandom(32)} with pytest.raises(CoseInvalidKey) as excinfo: _ = CoseKey.from_dict(cose_key) assert "COSE curve cannot be None" in str(excinfo.value) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448]) @pytest.mark.parametrize('kty', [KtyEC2, KtySymmetric, 2, 4]) def test_fail_on_illegal_kty(crv, kty): params = {KpKty: kty} with pytest.raises(CoseIllegalKeyType) as excinfo: _ = OKPKey(crv=crv, x=os.urandom(32), d=os.urandom(32), optional_params=params) assert "Illegal key type in OKP COSE Key" in str(excinfo.value) def test_remove_empty_keyops_list(): cose_key = {KpKty: KtyOKP, OKPKpD: os.urandom(16), KpAlg: EdDSA, OKPKpCurve: Ed25519, KpKeyOps: []} key = CoseKey.from_dict(cose_key) assert KpKeyOps not in key def test_existing_non_empty_keyops_list(): cose_key = {KpKty: KtyOKP, OKPKpD: os.urandom(16), KpAlg: EdDSA, OKPKpCurve: Ed448, KpKeyOps: [SignOp]} key = CoseKey.from_dict(cose_key) assert KpKeyOps in key def test_key_ops_setter_getter(): key = OKPKey.generate_key('ED25519') key.key_ops = [SignOp] assert SignOp in key.key_ops with pytest.raises(CoseIllegalKeyOps) as excinfo: key.key_ops = [MacVerifyOp] assert "Invalid COSE key operation" in str(excinfo) def test_dict_operations_on_okp_key(): cose_key = {KpKty: KtyOKP, OKPKpD: os.urandom(16), KpAlg: EdDSA, OKPKpCurve: Ed448, KpKeyOps: [SignOp]} key = CoseKey.from_dict(cose_key) assert KpKty in key assert OKPKpD in key assert OKPKpX not in key assert 1 in key assert -4 in key assert KpAlg in key assert 'ALG' in key def test_unknown_key_attributes(): key = 'a401012004215820a3ff263595beb377d1a0ce1d04dad2d40966ac6bcb622051b84659184d5d9a326c7375626a656374206e616d6560' key = CoseKey.decode(unhexlify(key)) assert "subject name" in key def test_key_set_curve(): key = 'a401012006215820898ff79a02067a16ea1eccb90fa52246f5aa4dd6ec076bba0259d904b7ec8b0c2358208f781a095372f85b6d' \ '9f6109ae422611734d7dbfa0069a2df2935bb2e053bf35' key = CoseKey.decode(unhexlify(key)) assert key.crv == Ed25519 key.crv = X25519 assert key.crv == X25519 with pytest.raises(CoseUnsupportedCurve) as excinfo: key.crv = 3 # P-521 assert "Invalid COSE curve" in str(excinfo.value) key.crv = X448.identifier assert key.crv == X448 def test_key_generation_with_optional_parameters(): key = OKPKey.generate_key(crv='ED25519', optional_params={'KpKid': 4})
the-stack_0_8185
if __name__ == '__main__': from setuptools import setup, Extension _synctex_parser = Extension('pysynctex._synctex_parser', sources=['wrapper/synctex_parser.i', 'wrapper/synctex_package/synctex_parser.c', 'wrapper/synctex_package/synctex_parser_utils.c'], include_dirs=['wrapper/synctex_package']) setup(name='PySyncTeX', version='0.2.0', author='Jan Kumor', author_email='[email protected]', description='Python wrapper for SyncTeX parser C library.', long_description=open('README.rst').read(), url='https://github.com/elohhim/PySyncTeX', license="MIT", platforms='ANY', packages=['pysynctex'], ext_modules=[_synctex_parser], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX :: Linux', 'Natural Language :: English', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Text Processing :: Markup :: LaTeX', ] )
the-stack_0_8189
#!/usr/bin/env python3 import os, sys service = "[Unit]\n"\ "Description={description}\n"\ "After=network.target\n"\ "StartLimitIntervalSec=0\n"\ "\n"\ "[Service]\n"\ "Type=simple\n"\ "Restart=always\n"\ "RestartSec=1\n"\ "User=root\n"\ "ExecStart={exec}\n"\ "\n"\ "[Install]\n"\ "WantedBy=multi-user.target" name = False desc = False path = False command = False for arg in sys.argv: if "--name=" in arg: name = arg.split('=')[1] + ".service" if "--path=" in arg: path = arg.split('=')[1] if "--command=" in arg: command = arg.split('=')[1] if "--desc=" in arg: desc = arg.split('=')[1] if arg == "-h" or arg == "--help": print("Usage: python3 createservice.py [--name=NAME] [--path=PATH] [--command=COMMAND] [--desc=DESC]") exit(0) if not name: name = input("Service name: ") + ".service" if not path: path = input("Executable binary path: ") if not command: command = input("Command and args: ") if not desc: desc = input("Description: ") service = service.replace("{description}", desc).replace("{exec}", path + " " + command) f = open("/lib/systemd/system/" + name, "w") f.write(service) f.close() print(service) print() print("Wrote to /lib/systemd/system/" + name) os.system("systemctl enable " + name) os.system("systemctl start " + name) print("Started and enabled service.")
the-stack_0_8191
from distutils.core import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name="GPGame", version="2020.0.2", author="Nishant Vikramaditya", author_email="[email protected]", description="An abstraction layer on the Kivy GPU accelerated engine.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Nv7-GitHub/GPGame", packages=["GPGame"], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.6', install_requires=["Kivy"] )
the-stack_0_8194
import tempfile, time, sys import pymailer f = tempfile.NamedTemporaryFile('r+t', suffix='.html', delete=True) f.write('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\ <html lang="fr">\ <head>\ <meta http-equiv="content-type" content="text/html;charset=utf-8" />\ </head>\ <body>\ <p>The computer has been turned on, on ') #print(f.name + ' created') arg = ['-s', f.name, '/home/romain/git/python-mailer/recipients.csv', 'Computer Turned On'] f.write((time.strftime("%A %d %B %Y, %H:%M:%S"))) f.write('.</p>\ </body>\ </html>') f.seek(0) # return to beginning of file pymailer.main(arg) f.close() # temporary file is automatically deleted here
the-stack_0_8196
# Copyright (c) 2004 Divmod. # See LICENSE for details. import urllib.request, urllib.parse, urllib.error, warnings from twisted.python import log, failure from nevow import util from nevow.stan import directive, Unset, invisible, _PrecompiledSlot from nevow.inevow import ICanHandleException, IData, IMacroFactory, IRenderer, IRendererFactory from nevow.flat import precompile, serialize from nevow.accessors import convertToData from nevow.context import WovenContext from nevow.util import toBytes, unicode allowSingleton = ('img', 'br', 'hr', 'base', 'meta', 'link', 'param', 'area', 'input', 'col', 'basefont', 'isindex', 'frame') def ProtoSerializer(original, context): return '<%s />' % original def _datacallback(result, context): context.remember(result, IData) return '' def TagSerializer(original, context, contextIsMine=False): """ Original is the tag. Context is either: - the context of someone up the chain (if contextIsMine is False) - this tag's context (if contextIsMine is True) """ # print "TagSerializer:",original, "ContextIsMine",contextIsMine, "Context:",context visible = bool(original.tagName) if visible and context.isAttrib: raise RuntimeError("Tried to render tag '%s' in an tag attribute context." % (original.tagName)) if context.precompile and original.macro: toBeRenderedBy = original.macro ## Special case for directive; perhaps this could be handled some other way with an interface? if isinstance(toBeRenderedBy, directive): toBeRenderedBy = IMacroFactory(context).macro(context, toBeRenderedBy.name) original.macro = Unset newContext = WovenContext(context, original) yield serialize(toBeRenderedBy(newContext), newContext) return ## TODO: Do we really need to bypass precompiling for *all* specials? ## Perhaps just render? if context.precompile and ( [x for x in list(original._specials.values()) if x is not None and x is not Unset] or original.slotData): ## The tags inside this one get a "fresh" parent chain, because ## when the context yielded here is serialized, the parent ## chain gets reconnected to the actual parents at that ## point, since the render function here could change ## the actual parentage hierarchy. nestedcontext = WovenContext(precompile=context.precompile, isAttrib=context.isAttrib) # If necessary, remember the MacroFactory onto the new context chain. macroFactory = IMacroFactory(context, None) if macroFactory is not None: nestedcontext.remember(macroFactory, IMacroFactory) original = original.clone(deep=False) if not contextIsMine: context = WovenContext(context, original) context.tag.children = precompile(context.tag.children, nestedcontext) yield context return ## Don't render patterns if original.pattern is not Unset and original.pattern is not None: return if not contextIsMine: if original.render: ### We must clone our tag before passing to a render function original = original.clone(deep=False) context = WovenContext(context, original) if original.data is not Unset: newdata = convertToData(original.data, context) if isinstance(newdata, util.Deferred): yield newdata.addCallback(lambda newdata: _datacallback(newdata, context)) else: _datacallback(newdata, context) if original.render: ## If we have a render function we want to render what it returns, ## not our tag toBeRenderedBy = original.render # erase special attribs so if the renderer returns the tag, # the specials won't be on the context twice. original._clearSpecials() yield serialize(toBeRenderedBy, context) return if not visible: for child in original.children: yield serialize(child, context) return yield '<%s' % original.tagName if original.attributes: attribContext = WovenContext(parent=context, precompile=context.precompile, isAttrib=True) for (k, v) in sorted(original.attributes.items()): if v is None: continue yield ' %s="' % k yield serialize(v, attribContext) yield '"' if not original.children: if original.tagName in allowSingleton: yield ' />' else: yield '></%s>' % original.tagName else: yield '>' for child in original.children: yield serialize(child, context) yield '</%s>' % original.tagName def EntitySerializer(original, context): if original.name in ['amp', 'gt', 'lt', 'quot']: return '&%s;' % original.name return '&#%s;' % original.num def _jsSingleQuoteQuote(quotable): return quotable.replace( "\\", "\\\\").replace( "'", r"\'").replace( "\n", "\\n").replace( "\r", "\\r") def RawSerializer(original, context): if context.inJSSingleQuoteString: return _jsSingleQuoteQuote(original) return original def StringSerializer(original, context): # Quote the string as necessary. URLs need special quoting - only # alphanumeric and a few punctation characters are valid. # Otherwise we use normal XML escaping rules but also replacing " # in an attribute because Nevow always uses "..." for values. original=toBytes(original) if context.inURL: # The magic string "-_.!*'()" also appears in url.py. Thinking about # changing this? Change that, too. return urllib.parse.quote(original, safe="-_.!*'()") ## quote it if context.inJS: original = _jsSingleQuoteQuote(original) if not context.inJSSingleQuoteString: original = b"'%s'" % (original, ) if context.isAttrib: return original.replace(b"&", b"&amp;").replace(b"<", b"&lt;").replace(b">", b"&gt;").replace(b'"', b"&quot;") elif context.inJS: return original else: return original.replace(b"&", b"&amp;").replace(b"<", b"&lt;").replace(b">", b"&gt;") def NoneWarningSerializer(original, context): if context.isAttrib: ## We don't want the big red None warning inside a html attribute. Just leave it blank. return b'' elif context.inURL: return b'' elif context.inJS: return b'' return b'<span style="font-size: xx-large; font-weight: bold; color: red; border: thick solid red;">None</span>' def StringCastSerializer(original, context): if context.inJS: return str(original) return StringSerializer(str(original), context) def BooleanSerializer(original, context): if context.inJS: if original: return b'true' return b'false' return str(original) def ListSerializer(original, context): for item in original: yield serialize(item, context) def XmlSerializer(original, context): return original.content PASS_SELF = object() def FunctionSerializer_nocontext(original): code = getattr(original, 'func_code', None) if code is None: return True argcount = code.co_argcount if argcount == 1: return True if argcount == 3: return PASS_SELF return False def FunctionSerializer(original, context, nocontextfun=FunctionSerializer_nocontext): if context.precompile: return WovenContext(tag=invisible(render=original)) else: data = convertToData(context.locate(IData), context) try: nocontext = nocontextfun(original) if nocontext is True: if hasattr(original, '__code__') and (original.__code__.co_argcount == 3 or ( original.__code__.co_argcount == 2 and original.__code__.co_varnames[0] != 'self')): result = original(context, data) else: result = original(data) else: if nocontext is PASS_SELF: renderer = context.locate(IRenderer) result = original(renderer, context, data) else: result = original(context, data) except StopIteration: raise RuntimeError("User function %r raised StopIteration." % original) return serialize(result, context) def MethodSerializer(original, context): def nocontext(original): func = getattr(original, 'im_func', None) code = getattr(func, 'func_code', None) return code is None or code.co_argcount == 2 return FunctionSerializer(original, context, nocontext) def RendererSerializer(original, context): def nocontext(original): func = getattr(original, 'im_func', None) code = getattr(func, 'func_code', None) return code is None or code.co_argcount == 2 return FunctionSerializer(original.rend, context, nocontext) def DirectiveSerializer(original, context): if context.precompile: return original rendererFactory = context.locate(IRendererFactory) renderer = rendererFactory.renderer(context, original.name) return serialize(renderer, context) def SlotSerializer(original, context): """ Serialize a slot. If the value is already available in the given context, serialize and return it. Otherwise, if this is a precompilation pass, return a new kind of slot which captures the current render context, so that any necessary quoting may be performed. Otherwise, raise an exception indicating that the slot cannot be serialized. """ if context.precompile: try: data = context.locateSlotData(original.name) except KeyError: return _PrecompiledSlot( original.name, precompile(original.children, context), original.default, context.isAttrib, context.inURL, context.inJS, context.inJSSingleQuoteString, original.filename, original.lineNumber, original.columnNumber) else: return serialize(data, context) try: data = context.locateSlotData(original.name) except KeyError: if original.default is None: raise data = original.default return serialize(data, context) def PrecompiledSlotSerializer(original, context): """ Serialize a pre-compiled slot. Return the serialized value of the slot or raise a KeyError if it has no value. """ # Precompilation should _not_ be happening at this point, but Nevow is very # sloppy about precompiling multiple times, so sometimes we are in a # precompilation context. In this case, there is nothing to do, just # return the original object. The case which seems to exercise this most # often is the use of a pattern as the stan document given to the stan # loader. The pattern has already been precompiled, but the stan loader # precompiles it again. This case should be eliminated by adding a loader # for precompiled documents. if context.precompile: warnings.warn( "[v0.9.9] Support for multiple precompilation passes is deprecated.", PendingDeprecationWarning) return original try: data = context.locateSlotData(original.name) except KeyError: if original.default is None: raise data = original.default originalContext = context.clone(deep=False) originalContext.isAttrib = original.isAttrib originalContext.inURL = original.inURL originalContext.inJS = original.inJS originalContext.inJSSingleQuoteString = original.inJSSingleQuoteString return serialize(data, originalContext) def ContextSerializer(original, context): """ Serialize the given context's tag in that context. """ originalContext = original.clone(deep=False) originalContext.precompile = context and context.precompile or False if originalContext.parent is not None: originalContext.parent = originalContext.parent.clone(cloneTags=False) originalContext.chain(context) try: return TagSerializer(originalContext.tag, originalContext, contextIsMine=True) except: f = failure.Failure() handler = context.locate(ICanHandleException) if handler: return handler.renderInlineError(context, f) else: log.err(f) return """<div style="border: 1px dashed red; color: red; clear: both">[[ERROR]]</div>""" def CommentSerializer(original, context): yield "<!--" for x in original.children: yield serialize(x, context) yield "-->" def DocFactorySerializer(original, ctx): """Serializer for document factories. """ return serialize(original.load(ctx), ctx) def FailureSerializer(original, ctx): from nevow import failure return serialize(failure.formatFailure(original), ctx)
the-stack_0_8197
#!/usr/bin/env python3 ################################################################################# # The MIT License (MIT) # # Copyright (c) 2015, George Webster. All rights reserved. # # Approved for Public Release; Distribution Unlimited 14-1511 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ################################################################################# import argparse import configparser import csv import logging import requests import sys import time from collections import namedtuple from itertools import islice def submit_crits(domain, cfg): """ Submits domain to CRITs """ headers = {'User-agent': 'benign_domains'} # submit domain url = "{0}/api/v1/domains/".format(cfg['crits'].get('url')) params = { 'api_key': cfg['crits'].get('key'), 'username': cfg['crits'].get('user'), 'source': cfg['crits'].get('source'), 'domain': domain } try: response = requests.post(url, headers=headers, data=params, verify=False) if response.status_code == requests.codes.ok: response_json = response.json() logging.info("\tSubmitted domain info for {0} to Crits, response was {1}".format(domain, response_json.get('message', ''))) except: logging.info("Exception caught from Crits when submitting domain {0}".format(domain)) def check_virustotal(domain, api_key, threshold): """ Checks VirusTotal to see if the domain is malicious """ #resource = "{0}domain".format("http://www.", domain) url = 'https://www.virustotal.com/vtapi/v2/url/report' params = {'resource': domain, 'apikey': api_key, 'allinfo': 1} try: response = requests.get(url, params=params) if response.status_code == requests.codes.ok: response_json = response.json() logging.info("\tSubmitted domain {0} to VirusTotal for verification, response was {1}".format(domain, response_json.get('verbose_msg', ''))) if response_json['response_code'] == 0: logging.info("\tVT: Has not seen {0} before, assuming domain is benign".format(domain)) return True elif response_json['response_code'] == -1: logging.debug("\tVT: Reporting that domain {0} is malformed, assuming malicious".format(domain)) return False elif response_json['response_code'] == 1: total = int(response_json.get('total', 0)) positive = int(response_json.get('positives', 0)) additionalinfo = response_json.get('additional_info', '') if additionalinfo: logging.info("\tVT: Category is: {0}".format(additionalinfo.get('categories', ''))) logging.info("\tVT: Positive scans: {0} out of {1} total scans".format(positive, total)) if positive > int(threshold): logging.info("\tVT: Threshold exceeded, skipping domain") return False else: logging.info("\tVT: Under threshold, domain is benign") return True except: logging.debug("Exception caught from VirusTotal when receiving report") return False def setup_cli(args, cfg): """ Configure command-line arguements """ description =""" Benign_domains outputs a list of preceived benign domains. This is intended to help gather data for ML training sets and generate white lists. The core set of domains are provided by majestic million. Options: - Validate domains against VirusTotal's datasets (in progress) - Submit domains to a CRITs instance - Output to a file""" parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-s', '--start', action='store', default=cfg['benign'].get('startDomain', fallback='0'), dest='start', type=int, help='Define starting domain rank number. Overrides config file') parser.add_argument('-e', '--end', action='store', default=cfg['benign'].get('endDomain', fallback='200'), dest='end', type=int, help='Define ending domain rank number. Overrides config file') return parser.parse_args(args) def main(): """ Main logic for program """ print("Starting up benign_domain parsing script!!!") # Read configuration file cfg = configparser.ConfigParser() cfg.read('benign.cfg') # Set up CLI interface args = setup_cli(sys.argv[1:], cfg) # Set up logging functionality logfile = cfg['logging'].get('filename', fallback='benign.log') level = cfg['logging'].get('level', fallback='INFO').upper() logformat = '%(asctime)s %(message)s' logging.basicConfig(filename=logfile, level=level, format=logformat) print("Writing to log file {0} at level {1}.".format(logfile, level)) inputFile = cfg['inputFile'].get('majestic', fallback='majestic_million.csv') print("Opening input file {0}.".format(inputFile)) print("Starting processing at domain {0}".format(args.start)) print("Ending processing at domain {0}".format(args.end)) if cfg['benign'].getboolean('outputFile', fallback=True): outputFile = cfg['outputFile'].get('filename', fallback='benign.domains') print("Saving output to file {0}.".format(outputFile)) if cfg['benign'].getboolean('submitToCrits', fallback=False): url = cfg['crits'].get('url', '') username = cfg['crits'].get('user', '') source = cfg['crits'].get('source', '') print("Submitting domains to CRITs at: \n\tURL: {0}\n\tUser: {1}\n\tSource: {2}".format(url, username, source)) # Quick checks before entering the loop if args.start == 0: args.start = 1 if args.start > args.end: print("Starting # must be greater then ending #.\nExiting") sys.exit() if int(cfg['virustotal'].get('threshold', 0)) < 1: print("Threshold must be greater then 0, setting to 1") cfg['virustotal']['threshold'] = 1 print("\nResults:\n--------------------------------------------------------------") with open(inputFile) as infile: f_csv = csv.reader(infile) headings = next(f_csv) Row = namedtuple('Row', headings) for r in islice(f_csv, args.start - 1, args.end): row = Row(*r) print("Processing domain: {0} at position: {1}".format(row.Domain, f_csv.line_num - 1)) logging.info("Processing domain: {0} at position: {1}".format(row.Domain, f_csv.line_num - 1)) if cfg['benign'].getboolean('checkVirustotal', fallback=False): if not check_virustotal(row.Domain, cfg['virustotal'].get('key'), cfg['virustotal'].get('threshold')): continue if cfg['benign'].getboolean('outputFile', fallback=True): outputFile = cfg['outputFile'].get('filename', fallback='benign.domains') logging.info("\tWriting domain {0} to file {1}".format(row.Domain, outputFile)) with open(outputFile, 'at') as f: f.write(row.Domain + "\n") #print(row.Domain, file=f) if cfg['benign'].getboolean('submitToCrits', fallback=False): submit_crits(row.Domain, cfg) time.sleep(float(cfg['benign'].get('wait', fallback='1.0'))) if __name__ == "__main__": try: main() except KeyboardInterrupt: sys.exit()
the-stack_0_8198
# Copyright (c) 2008, Humanized, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Enso nor the names of its contributors may # be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- # # enso # # ---------------------------------------------------------------------------- import logging class EventResponderList(object): """ Behaves like a dictionary with limited functionality. When it become non-empty, an event handler is registered for a particular event and called whenever the event occurs. When the it's empty, the event handler is unregistered and will not be called until it becomes non-empty again. """ def __init__(self, eventManager, eventName, responderFunc): self.__eventManager = eventManager self.__eventName = eventName self.__responderFunc = responderFunc self.__isRegistered = False self.__items = {} def __setitem__(self, key, value): """ if (not isinstance(item, slice) or not (item.start is None and item.stop is None)): raise NotImplementedError() """ self.__items[key] = value self.__onItemsChanged() def __delitem__(self, key): del self.__items[key] self.__onItemsChanged() def __iter__(self): for key, item in self.__items.items(): yield key, item def __onItemsChanged(self): if self.__items and (not self.__isRegistered): assert logging.debug( "Registering EventResponderList for onTimer event") or True self.__eventManager.registerResponder( self.__responderFunc, self.__eventName ) self.__isRegistered = True elif self.__isRegistered and (not self.__items): assert logging.debug( "Removing EventResponderList for onTimer event") or True self.__eventManager.removeResponder(self.__responderFunc) self.__isRegistered = False def fromlist(self, lst): self.__items = dict((id(item), item) for item in lst) self.__onItemsChanged() def clear(self): self.__items.clear() self.__onItemsChanged()
the-stack_0_8200
""" Author: <REPLACE> Project: 100DaysPython File: module3_day29_fileManipulations.py Creation Date: <REPLACE> Description: <REPLACE> """ import os # First change the working directory to point to the folder containing the files os.chdir("./audio") # The `.listdir()` function populates the contents of the folder. This can be iterated over to work with the files. for file in os.listdir(): # The files contain the format `title_module_day_track.mp3` # The `os.path.splitext()` function separates the extension from the file name. This can be used to create a tuple # of the file name and the file extension. file_name, file_ext = os.path.splitext(file) # Since the folder can contain files other than `.mp3`, the program will be told to ignore all other extensions. if file_ext != ".mp3": continue # Similar to the method of splitting off the file extension, the title, module, day, and track can all be separated # into a tuple by splitting on the underscore. title, module, day, track = file_name.split("_") # The track number includes the number sign which isn't ideal and needs to be removed. Additionally, since there are # tracks in the double digits, the system will sort track 10 immediately after track 1. Therefore, padding also # needs to be applied using the `.zfill()` method to ensure proper order. track = track[1:].zfill(2) # The `.rename()` function can then be used to rename the file with the desired format. new_name = f"{track}-{title}{file_ext}" os.rename(file, new_name)
the-stack_0_8201
#!/usr/bin/python import datetime from transformers import TFBertForSequenceClassification import tensorflow as tf from tensorflow.keras import Input from tensorflow.keras import backend as K, initializers, regularizers, constraints from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Layer, Dropout, LSTM, Dense, InputLayer from tensorflow.keras.losses import Loss class Attention(Layer): """ SOURCE: https://gist.github.com/cbaziotis/6428df359af27d58078ca5ed9792bd6d """ def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, return_attention=False, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Note: The layer has been tested with Keras 1.x Example: # 1 model.add(LSTM(64, return_sequences=True)) model.add(Attention()) # next add a Dense layer (for classification/regression) or whatever... # 2 - Get the attention scores hidden = LSTM(64, return_sequences=True)(words) sentence, word_scores = Attention(return_attention=True)(hidden) """ self.supports_masking = True self.return_attention = return_attention self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs) def get_config(self): config = super().get_config().copy() config.update({ 'supports_masking': self.supports_masking, 'return_attention': self.return_attention, 'init': self.init, 'W_regularizer': self.W_regularizer, 'b_regularizer': self.b_regularizer, 'W_constraint': self.W_constraint, 'b_constraint': self.b_constraint, 'bias': self.bias, }) return config def build(self, input_shape): assert len(input_shape) == 3 self.W = self.add_weight(shape=(input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) if self.bias: self.b = self.add_weight(shape=(input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): # do not pass the mask to the next layers return None def call(self, x, mask=None): eij = dot_product(x, self.W) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) # apply mask after the exp. will be re-normalized next if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano a *= K.cast(mask, K.floatx()) # in some cases especially in the early stages of training the sum may be almost zero # and this results in NaN's. A workaround is to add a very small positive number ε to the sum. # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx()) weighted_input = x * K.expand_dims(a) result = K.sum(weighted_input, axis=1) if self.return_attention: return [result, a] return result def compute_output_shape(self, input_shape): if self.return_attention: return [(input_shape[0], input_shape[-1]), (input_shape[0], input_shape[1])] else: return input_shape[0], input_shape[-1] class RankingError(Loss): def __init__(self, batch_size): super().__init__() self.batch_size = batch_size def call(self, y_true, y_diff): pos = tf.constant([1.0 for i in range(self.batch_size)]) neg = tf.constant([-1.0 for i in range(self.batch_size)]) sign = tf.where(tf.equal(y_true,1.0), pos, neg) return tf.math.maximum(0.0, 1.0 - sign * y_diff) def dot_product(x, kernel): """ SOURCE: https://gist.github.com/cbaziotis/6428df359af27d58078ca5ed9792bd6d Wrapper for dot product operation, in order to be compatible with both Theano and Tensorflow Args: x (): input kernel (): weights Returns: """ if K.backend() == 'tensorflow': # todo: check that this is correct return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1) else: return K.dot(x, kernel) def build_base_model(input_shape, hidden_units, dropout_prob, model_name='base'): model = Sequential(name=model_name) model.add(LSTM(hidden_units, input_shape=input_shape, return_sequences=True, name='lstm')) model.add(Attention(name='attention')) model.add(Dropout(dropout_prob)) model.add(Dense(1, activation='sigmoid', name='dense')) return model def build_ranking_model(base_forward_func, input1, input2): out_s1 = base_forward_func(input1) out_s1 = Layer(name='out_s1')(out_s1) out_s2 = base_forward_func(input2) out_diff = Layer(name='out_diff')(tf.math.subtract(out_s1, out_s2, name='out_diff')) if isinstance(input1, list) and isinstance(input2, list): total_inputs = input1 + input2 else: total_inputs = [input1] + [input2] return tf.keras.Model(inputs=total_inputs, outputs=[out_s1, out_diff], name='ranking') def load_bert_model(model_path): cbert_model = TFBertForSequenceClassification.from_pretrained(model_path) cbert_model.classifier.activation = tf.keras.activations.sigmoid return cbert_model
the-stack_0_8203
from __future__ import annotations import inspect import re from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type, Union, cast import numpy as np from pandas._libs import ( Interval, Period, Timestamp, algos as libalgos, internals as libinternals, lib, writers, ) from pandas._libs.internals import BlockPlacement from pandas._libs.tslibs import conversion from pandas._typing import ArrayLike, Dtype, DtypeObj, Scalar, Shape from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( astype_dt64_to_dt64tz, astype_nansafe, can_hold_element, convert_scalar_for_putitemlike, find_common_type, infer_dtype_from, infer_dtype_from_scalar, maybe_downcast_numeric, maybe_downcast_to_dtype, maybe_promote, maybe_upcast, soft_convert_objects, ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_integer, is_list_like, is_object_dtype, is_re, is_re_compilable, is_sparse, pandas_dtype, ) from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCPandasArray, ABCSeries from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( putmask_inplace, putmask_smart, putmask_without_repeat, ) from pandas.core.array_algos.replace import compare_or_regex_search, replace_regex from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, PandasArray, PandasDtype, TimedeltaArray, ) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.indexers import ( check_setitem_lengths, is_empty_indexer, is_exact_shape_match, is_scalar_indexer, ) import pandas.core.missing as missing from pandas.core.nanops import nanpercentile if TYPE_CHECKING: from pandas import Index from pandas.core.arrays._mixins import NDArrayBackedExtensionArray class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure Index-ignorant; let the container take care of that """ values: Union[np.ndarray, ExtensionArray] __slots__ = ["_mgr_locs", "values", "ndim"] is_numeric = False is_float = False is_datetime = False is_datetimetz = False is_timedelta = False is_bool = False is_object = False is_extension = False _can_hold_na = False _can_consolidate = True _validate_ndim = True @classmethod def _simple_new( cls, values: ArrayLike, placement: BlockPlacement, ndim: int ) -> Block: """ Fastpath constructor, does *no* validation """ obj = object.__new__(cls) obj.ndim = ndim obj.values = values obj._mgr_locs = placement return obj def __init__(self, values, placement, ndim: int): """ Parameters ---------- values : np.ndarray or ExtensionArray placement : BlockPlacement (or castable) ndim : int 1 for SingleBlockManager/Series, 2 for BlockManager/DataFrame """ # TODO(EA2D): ndim will be unnecessary with 2D EAs self.ndim = self._check_ndim(values, ndim) self.mgr_locs = placement self.values = self._maybe_coerce_values(values) if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values): raise ValueError( f"Wrong number of items passed {len(self.values)}, " f"placement implies {len(self.mgr_locs)}" ) def _maybe_coerce_values(self, values): """ Ensure we have correctly-typed values. Parameters ---------- values : np.ndarray, ExtensionArray, Index Returns ------- np.ndarray or ExtensionArray """ return values def _check_ndim(self, values, ndim): """ ndim inference and validation. Infers ndim from 'values' if not provided to __init__. Validates that values.ndim and ndim are consistent if and only if the class variable '_validate_ndim' is True. Parameters ---------- values : array-like ndim : int or None Returns ------- ndim : int Raises ------ ValueError : the number of dimensions do not match """ if ndim is None: ndim = values.ndim if self._validate_ndim and values.ndim != ndim: raise ValueError( "Wrong number of dimensions. " f"values.ndim != ndim [{values.ndim} != {ndim}]" ) return ndim @property def _holder(self): """ The array-like that can hold the underlying values. None for 'Block', overridden by subclasses that don't use an ndarray. """ return None @property def _consolidate_key(self): return self._can_consolidate, self.dtype.name @property def is_view(self) -> bool: """ return a boolean if I am possibly a view """ values = self.values values = cast(np.ndarray, values) return values.base is not None @property def is_categorical(self) -> bool: return self._holder is Categorical @property def is_datelike(self) -> bool: """ return True if I am a non-datelike """ return self.is_datetime or self.is_timedelta def external_values(self): """ The array that Series.values returns (public attribute). This has some historical constraints, and is overridden in block subclasses to return the correct array (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray instead of proper extension array). """ return self.values def internal_values(self): """ The array that Series._values returns (internal values). """ return self.values def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. """ return PandasArray(self.values) def get_values(self, dtype: Optional[Dtype] = None): """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ if is_object_dtype(dtype): return self.values.astype(object) return self.values def get_block_values_for_json(self) -> np.ndarray: """ This is used in the JSON C code. """ # TODO(EA2D): reshape will be unnecessary with 2D EAs return np.asarray(self.values).reshape(self.shape) @property def fill_value(self): return np.nan @property def mgr_locs(self): return self._mgr_locs @mgr_locs.setter def mgr_locs(self, new_mgr_locs): if not isinstance(new_mgr_locs, libinternals.BlockPlacement): new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs) self._mgr_locs = new_mgr_locs def make_block(self, values, placement=None) -> Block: """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self.mgr_locs if self.is_extension: values = _block_shape(values, ndim=self.ndim) return make_block(values, placement=placement, ndim=self.ndim) def make_block_same_class(self, values, placement=None, ndim=None): """ Wrap given values in a block of same type as self. """ if placement is None: placement = self.mgr_locs if ndim is None: ndim = self.ndim return type(self)(values, placement=placement, ndim=ndim) def __repr__(self) -> str: # don't want to print out all of the items here name = type(self).__name__ if self.ndim == 1: result = f"{name}: {len(self)} dtype: {self.dtype}" else: shape = " x ".join(str(s) for s in self.shape) result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" return result def __len__(self) -> int: return len(self.values) def __getstate__(self): return self.mgr_locs.indexer, self.values def __setstate__(self, state): self.mgr_locs = libinternals.BlockPlacement(state[0]) self.values = state[1] self.ndim = self.values.ndim def _slice(self, slicer): """ return a slice of my values """ return self.values[slicer] def getitem_block(self, slicer, new_mgr_locs=None): """ Perform __getitem__-like, return result as block. As of now, only supports slices that preserve dimensionality. """ if new_mgr_locs is None: axis0_slicer = slicer[0] if isinstance(slicer, tuple) else slicer new_mgr_locs = self.mgr_locs[axis0_slicer] elif not isinstance(new_mgr_locs, BlockPlacement): new_mgr_locs = BlockPlacement(new_mgr_locs) new_values = self._slice(slicer) if self._validate_ndim and new_values.ndim != self.ndim: raise ValueError("Only same dim slicing is allowed") return type(self)._simple_new(new_values, new_mgr_locs, self.ndim) @property def shape(self): return self.values.shape @property def dtype(self): return self.values.dtype def iget(self, i): return self.values[i] def set_inplace(self, locs, values): """ Modify block values in-place with new item value. Notes ----- `set` never creates a new array or new Block, whereas `setitem` _may_ create a new array and always creates a new Block. """ self.values[locs] = values def delete(self, loc) -> None: """ Delete given loc(-s) from block in-place. """ self.values = np.delete(self.values, loc, 0) self.mgr_locs = self.mgr_locs.delete(loc) def apply(self, func, **kwargs) -> List[Block]: """ apply the function to my values; return a block if we are not one """ with np.errstate(all="ignore"): result = func(self.values, **kwargs) return self._split_op_result(result) def reduce(self, func, ignore_failures: bool = False) -> List[Block]: # We will apply the function and reshape the result into a single-row # Block with the same mgr_locs; squeezing will be done at a higher level assert self.ndim == 2 try: result = func(self.values) except (TypeError, NotImplementedError): if ignore_failures: return [] raise if np.ndim(result) == 0: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: res_values = result.reshape(-1, 1) nb = self.make_block(res_values) return [nb] def _split_op_result(self, result) -> List[Block]: # See also: split_and_operate if is_extension_array_dtype(result) and result.ndim > 1: # TODO(EA2D): unnecessary with 2D EAs # if we get a 2D ExtensionArray, we need to split it into 1D pieces nbs = [] for i, loc in enumerate(self.mgr_locs): vals = result[i] block = self.make_block(values=vals, placement=[loc]) nbs.append(block) return nbs if not isinstance(result, Block): result = self.make_block(result) return [result] def fillna( self, value, limit=None, inplace: bool = False, downcast=None ) -> List[Block]: """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ inplace = validate_bool_kwarg(inplace, "inplace") mask = isna(self.values) mask = _extract_bool_array(mask) if limit is not None: limit = libalgos.validate_limit(None, limit=limit) mask[mask.cumsum(self.ndim - 1) > limit] = False if not self._can_hold_na: if inplace: return [self] else: return [self.copy()] if self._can_hold_element(value): nb = self if inplace else self.copy() putmask_inplace(nb.values, mask, value) # TODO: should be nb._maybe_downcast? return self._maybe_downcast([nb], downcast) # we can't process the value, but nothing to do if not mask.any(): return [self] if inplace else [self.copy()] # operate column-by-column def f(mask, val, idx): block = self.coerce_to_target_dtype(value) # slice out our block if idx is not None: # i.e. self.ndim == 2 block = block.getitem_block(slice(idx, idx + 1)) return block.fillna(value, limit=limit, inplace=inplace, downcast=None) return self.split_and_operate(None, f, inplace) def _split(self) -> List[Block]: """ Split a block into a list of single-column blocks. """ assert self.ndim == 2 new_blocks = [] for i, ref_loc in enumerate(self.mgr_locs): vals = self.values[slice(i, i + 1)] nb = self.make_block(vals, [ref_loc]) new_blocks.append(nb) return new_blocks def split_and_operate( self, mask, f, inplace: bool, ignore_failures: bool = False ) -> List[Block]: """ split the block per-column, and apply the callable f per-column, return a new block for each. Handle masking which will not change a block unless needed. Parameters ---------- mask : 2-d boolean mask f : callable accepting (1d-mask, 1d values, indexer) inplace : bool ignore_failures : bool, default False Returns ------- list of blocks """ if mask is None: mask = np.broadcast_to(True, shape=self.shape) new_values = self.values def make_a_block(nv, ref_loc): if isinstance(nv, list): assert len(nv) == 1, nv assert isinstance(nv[0], Block) block = nv[0] else: # Put back the dimension that was taken from it and make # a block out of the result. nv = _block_shape(nv, ndim=self.ndim) block = self.make_block(values=nv, placement=ref_loc) return block # ndim == 1 if self.ndim == 1: if mask.any(): nv = f(mask, new_values, None) else: nv = new_values if inplace else new_values.copy() block = make_a_block(nv, self.mgr_locs) return [block] # ndim > 1 new_blocks = [] for i, ref_loc in enumerate(self.mgr_locs): m = mask[i] v = new_values[i] # need a new block if m.any() or m.size == 0: # Apply our function; we may ignore_failures if this is a # reduction that is dropping nuisance columns GH#37827 try: nv = f(m, v, i) except TypeError: if ignore_failures: continue else: raise else: nv = v if inplace else v.copy() block = make_a_block(nv, [ref_loc]) new_blocks.append(block) return new_blocks def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: # no need to downcast our float # unless indicated if downcast is None and (self.is_float or self.is_datelike): return blocks return extend_blocks([b.downcast(downcast) for b in blocks]) def downcast(self, dtypes=None) -> List[Block]: """ try to downcast each item to the dict of dtypes if present """ # turn it off completely if dtypes is False: return [self] values = self.values if self.ndim == 1: # try to cast all non-floats here if dtypes is None: dtypes = "infer" nv = maybe_downcast_to_dtype(values, dtypes) return [self.make_block(nv)] # ndim > 1 if dtypes is None: return [self] if not (dtypes == "infer" or isinstance(dtypes, dict)): raise ValueError( "downcast must have a dictionary or 'infer' as its argument" ) elif dtypes != "infer": raise AssertionError("dtypes as dict is not supported yet") # operate column-by-column # this is expensive as it splits the blocks items-by-item def f(mask, val, idx): val = maybe_downcast_to_dtype(val, dtype="infer") return val return self.split_and_operate(None, f, False) def astype(self, dtype, copy: bool = False, errors: str = "raise"): """ Coerce to the new dtype. Parameters ---------- dtype : str, dtype convertible copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object Returns ------- Block """ errors_legal_values = ("raise", "ignore") if errors not in errors_legal_values: invalid_arg = ( "Expected value of kwarg 'errors' to be one of " f"{list(errors_legal_values)}. Supplied value is '{errors}'" ) raise ValueError(invalid_arg) if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): msg = ( f"Expected an instance of {dtype.__name__}, " "but got the class instead. Try instantiating 'dtype'." ) raise TypeError(msg) dtype = pandas_dtype(dtype) try: new_values = self._astype(dtype, copy=copy) except (ValueError, TypeError): # e.g. astype_nansafe can fail on object-dtype of strings # trying to convert to float if errors == "ignore": new_values = self.values else: raise newb = self.make_block(new_values) if newb.is_numeric and self.is_numeric: if newb.shape != self.shape: raise TypeError( f"cannot set astype for copy = [{copy}] for dtype " f"({self.dtype.name} [{self.shape}]) to different shape " f"({newb.dtype.name} [{newb.shape}])" ) return newb def _astype(self, dtype: DtypeObj, copy: bool) -> ArrayLike: values = self.values if is_datetime64tz_dtype(dtype) and is_datetime64_dtype(values.dtype): return astype_dt64_to_dt64tz(values, dtype, copy, via_utc=True) if is_dtype_equal(values.dtype, dtype): if copy: return values.copy() return values if isinstance(values, ExtensionArray): values = values.astype(dtype, copy=copy) else: values = astype_nansafe(values, dtype, copy=copy) return values def convert( self, copy: bool = True, datetime: bool = True, numeric: bool = True, timedelta: bool = True, ) -> List[Block]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ return [self.copy()] if copy else [self] def _can_hold_element(self, element: Any) -> bool: """ require the same dtype as ourselves """ raise NotImplementedError("Implemented on subclasses") def should_store(self, value: ArrayLike) -> bool: """ Should we set self.values[indexer] = value inplace or do we need to cast? Parameters ---------- value : np.ndarray or ExtensionArray Returns ------- bool """ return is_dtype_equal(value.dtype, self.dtype) def to_native_types(self, na_rep="nan", quoting=None, **kwargs): """ convert to our native types format """ values = self.values mask = isna(values) itemsize = writers.word_len(na_rep) if not self.is_object and not quoting and itemsize: values = values.astype(str) if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize: # enlarge for the na_rep values = values.astype(f"<U{itemsize}") else: values = np.array(values, dtype="object") values[mask] = na_rep return self.make_block(values) # block actions # def copy(self, deep: bool = True): """ copy constructor """ values = self.values if deep: values = values.copy() return self.make_block_same_class(values, ndim=self.ndim) def replace( self, to_replace, value, inplace: bool = False, regex: bool = False, ) -> List[Block]: """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """ inplace = validate_bool_kwarg(inplace, "inplace") original_to_replace = to_replace if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that # replacing it is a no-op. # Note: If to_replace were a list, NDFrame.replace would call # replace_list instead of replace. return [self] if inplace else [self.copy()] values = self.values mask = missing.mask_missing(values, to_replace) if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly # bc _can_hold_element is incorrect. return [self] if inplace else [self.copy()] if not self._can_hold_element(value): blk = self.astype(object) return blk.replace( to_replace=original_to_replace, value=value, inplace=True, regex=regex, ) blk = self if inplace else self.copy() putmask_inplace(blk.values, mask, value) blocks = blk.convert(numeric=False, copy=not inplace) return blocks def _replace_regex( self, to_replace, value, inplace: bool = False, convert: bool = True, mask=None, ) -> List[Block]: """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. convert : bool, default True If true, try to coerce any object types to better types. mask : array-like of bool, optional True indicate corresponding element is ignored. Returns ------- List[Block] """ if not self._can_hold_element(to_replace): # i.e. only ObjectBlock, but could in principle include a # String ExtensionBlock return [self] if inplace else [self.copy()] rx = re.compile(to_replace) new_values = self.values if inplace else self.values.copy() replace_regex(new_values, rx, value, mask) block = self.make_block(new_values) if convert: nbs = block.convert(numeric=False) else: nbs = [block] return nbs def _replace_list( self, src_list: List[Any], dest_list: List[Any], inplace: bool = False, regex: bool = False, ) -> List[Block]: """ See BlockManager._replace_list docstring. """ # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) ] if not len(pairs): # shortcut, nothing to replace return [self] if inplace else [self.copy()] src_len = len(pairs) - 1 def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray: """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): return ~mask return compare_or_regex_search(self.values, s, regex, mask) if self.is_object: # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations mask = ~isna(self.values) masks = [comp(s[0], mask, regex) for s in pairs] else: # GH#38086 faster if we know we dont need to check for regex masks = [missing.mask_missing(self.values, s[0]) for s in pairs] masks = [_extract_bool_array(x) for x in masks] rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(pairs): new_rb: List["Block"] = [] for blk in rb: m = masks[i] convert = i == src_len # only convert once at the end result = blk._replace_coerce( to_replace=src, value=dest, mask=m, inplace=inplace, regex=regex, ) if convert and blk.is_object: result = extend_blocks( [b.convert(numeric=False, copy=True) for b in result] ) new_rb.extend(result) rb = new_rb return rb def setitem(self, indexer, value): """ Attempt self.values[indexer] = value, possibly creating a new array. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ transpose = self.ndim == 2 if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: raise ValueError(f"Cannot set values with ndim > {self.ndim}") # coerce None values, if appropriate if value is None: if self.is_numeric: value = np.nan # coerce if block dtype can store value values = self.values if not self._can_hold_element(value): # current dtype cannot store value, coerce to common dtype # TODO: can we just use coerce_to_target_dtype for all this if hasattr(value, "dtype"): dtype = value.dtype elif lib.is_scalar(value) and not isna(value): dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) else: # e.g. we are bool dtype and value is nan # TODO: watch out for case with listlike value and scalar/empty indexer dtype, _ = maybe_promote(np.array(value).dtype) return self.astype(dtype).setitem(indexer, value) dtype = find_common_type([values.dtype, dtype]) assert not is_dtype_equal(self.dtype, dtype) # otherwise should have _can_hold_element return self.astype(dtype).setitem(indexer, value) if self.dtype.kind in ["m", "M"]: arr = self.array_values().T arr[indexer] = value return self # value must be storable at this moment if is_extension_array_dtype(getattr(value, "dtype", None)): # We need to be careful not to allow through strings that # can be parsed to EADtypes is_ea_value = True arr_value = value else: is_ea_value = False arr_value = np.array(value) if transpose: values = values.T # length checking check_setitem_lengths(indexer, value, values) exact_match = is_exact_shape_match(values, arr_value) if is_empty_indexer(indexer, arr_value): # GH#8669 empty indexers pass elif is_scalar_indexer(indexer, self.ndim): # setting a single element for each dim and with a rhs that could # be e.g. a list; see GH#6043 values[indexer] = value elif exact_match and is_categorical_dtype(arr_value.dtype): # GH25495 - If the current dtype is not categorical, # we need to create a new categorical block values[indexer] = value if values.ndim == 2: # TODO(EA2D): special case not needed with 2D EAs if values.shape[-1] != 1: # shouldn't get here (at least until 2D EAs) raise NotImplementedError values = values[:, 0] return self.make_block(Categorical(values, dtype=arr_value.dtype)) elif exact_match and is_ea_value: # GH#32395 if we're going to replace the values entirely, just # substitute in the new array return self.make_block(arr_value) # if we are an exact match (ex-broadcasting), # then use the resultant dtype elif exact_match: # We are setting _all_ of the array's values, so can cast to new dtype values[indexer] = value values = values.astype(arr_value.dtype, copy=False) elif is_ea_value: # GH#38952 if values.ndim == 1: values[indexer] = value else: # TODO(EA2D): special case not needed with 2D EA values[indexer] = value.to_numpy(values.dtype).reshape(-1, 1) # set else: values[indexer] = value if transpose: values = values.T block = self.make_block(values) return block def putmask(self, mask, new) -> List[Block]: """ putmask the data to the block; it is possible that we may create a new dtype of block Return the resulting block(s). Parameters ---------- mask : np.ndarray[bool], SparseArray[bool], or BooleanArray new : a ndarray/object Returns ------- List[Block] """ transpose = self.ndim == 2 mask = _extract_bool_array(mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) new_values = self.values # delay copy if possible. # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: # FIXME: make sure we have compatible NA new = self.fill_value if self._can_hold_element(new): if self.dtype.kind in ["m", "M"]: arr = self.array_values() arr = cast("NDArrayBackedExtensionArray", arr) if transpose: arr = arr.T arr.putmask(mask, new) return [self] if transpose: new_values = new_values.T putmask_without_repeat(new_values, mask, new) return [self] elif not mask.any(): return [self] else: # may need to upcast if transpose: mask = mask.T if isinstance(new, np.ndarray): new = new.T # operate column-by-column def f(mask, val, idx): if idx is None: # ndim==1 case. n = new else: if isinstance(new, np.ndarray): n = np.squeeze(new[idx % new.shape[0]]) else: n = np.array(new) # type of the new block dtype, _ = maybe_promote(n.dtype) # we need to explicitly astype here to make a copy n = n.astype(dtype) nv = putmask_smart(val, mask, n) return nv new_blocks = self.split_and_operate(mask, f, True) return new_blocks def coerce_to_target_dtype(self, other): """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ # if we cannot then coerce to object dtype, _ = infer_dtype_from(other, pandas_dtype=True) new_dtype = find_common_type([self.dtype, dtype]) return self.astype(new_dtype, copy=False) def interpolate( self, method: str = "pad", axis: int = 0, index: Optional[Index] = None, inplace: bool = False, limit: Optional[int] = None, limit_direction: str = "forward", limit_area: Optional[str] = None, fill_value: Optional[Any] = None, coerce: bool = False, downcast: Optional[str] = None, **kwargs, ): inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # If there are no NAs, then interpolate is a no-op return self if inplace else self.copy() # a fill na type method try: m = missing.clean_fill_method(method) except ValueError: m = None if m is not None: if fill_value is not None: # similar to validate_fillna_kwargs raise ValueError("Cannot pass both fill_value and method") return self._interpolate_with_fill( method=m, axis=axis, inplace=inplace, limit=limit, limit_area=limit_area, downcast=downcast, ) # validate the interp method m = missing.clean_interp_method(method, **kwargs) assert index is not None # for mypy return self._interpolate( method=m, index=index, axis=axis, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, inplace=inplace, downcast=downcast, **kwargs, ) def _interpolate_with_fill( self, method: str = "pad", axis: int = 0, inplace: bool = False, limit: Optional[int] = None, limit_area: Optional[str] = None, downcast: Optional[str] = None, ) -> List[Block]: """ fillna but using the interpolate machinery """ inplace = validate_bool_kwarg(inplace, "inplace") assert self._can_hold_na # checked by caller values = self.values if inplace else self.values.copy() values = missing.interpolate_2d( values, method=method, axis=axis, limit=limit, limit_area=limit_area, ) blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast) def _interpolate( self, method: str, index: Index, fill_value: Optional[Any] = None, axis: int = 0, limit: Optional[int] = None, limit_direction: str = "forward", limit_area: Optional[str] = None, inplace: bool = False, downcast: Optional[str] = None, **kwargs, ) -> List[Block]: """ interpolate using scipy wrappers """ inplace = validate_bool_kwarg(inplace, "inplace") data = self.values if inplace else self.values.copy() # only deal with floats if not self.is_float: if self.dtype.kind not in ["i", "u"]: return [self] data = data.astype(np.float64) if fill_value is None: fill_value = self.fill_value if method in ("krogh", "piecewise_polynomial", "pchip"): if not index.is_monotonic: raise ValueError( f"{method} interpolation requires that the index be monotonic." ) # process 1-d slices in the axis direction def func(yvalues: np.ndarray) -> np.ndarray: # process a 1-d slice, returning it # should the axis argument be handled below in apply_along_axis? # i.e. not an arg to missing.interpolate_1d return missing.interpolate_1d( xvalues=index, yvalues=yvalues, method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, bounds_error=False, **kwargs, ) # interp each column independently interp_values = np.apply_along_axis(func, axis, data) blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast) def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_value=lib.no_default): """ Take values according to indexer and return them as a block.bb """ # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock # so need to preserve types # sparse is treated like an ndarray, but needs .get_values() shaping values = self.values if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: allow_fill = True new_values = algos.take_nd( values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value ) # Called from three places in managers, all of which satisfy # this assertion assert not (axis == 0 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self.mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def diff(self, n: int, axis: int = 1) -> List[Block]: """ return block for the diff of the values """ new_values = algos.diff(self.values, n, axis=axis, stacklevel=7) return [self.make_block(values=new_values)] def shift(self, periods: int, axis: int = 0, fill_value=None): """ shift the block by periods, possibly upcast """ # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = maybe_upcast(self.values, fill_value) new_values = shift(new_values, periods, axis, fill_value) return [self.make_block(new_values)] def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: """ evaluate the block; return result block(s) from the result Parameters ---------- other : a ndarray/object cond : np.ndarray[bool], SparseArray[bool], or BooleanArray errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object axis : int, default 0 Returns ------- List[Block] """ import pandas.core.computation.expressions as expressions assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) assert errors in ["raise", "ignore"] transpose = self.ndim == 2 values = self.values orig_other = other if transpose: values = values.T cond = _extract_bool_array(cond) if cond.ravel("K").all(): result = values else: # see if we can operate on the entire block, or need item-by-item # or if we are a single block (ndim == 1) if ( (self.dtype.kind in ["b", "i", "u"]) and lib.is_float(other) and np.isnan(other) ): # GH#3733 special case to avoid object-dtype casting # and go through numexpr path instead. # In integer case, np.where will cast to floats pass elif not self._can_hold_element(other): # we cannot coerce, return a compat dtype # we are explicitly ignoring errors block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, errors=errors, axis=axis) return self._maybe_downcast(blocks, "infer") if not ( (self.dtype.kind in ["b", "i", "u"]) and lib.is_float(other) and np.isnan(other) ): # convert datetime to datetime64, timedelta to timedelta64 other = convert_scalar_for_putitemlike(other, values.dtype) # By the time we get here, we should have all Series/Index # args extracted to ndarray result = expressions.where(cond, values, other) if self._can_hold_na or self.ndim == 1: if transpose: result = result.T return [self.make_block(result)] # might need to separate out blocks axis = cond.ndim - 1 cond = cond.swapaxes(axis, 0) mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool) result_blocks: List[Block] = [] for m in [mask, ~mask]: if m.any(): result = cast(np.ndarray, result) # EABlock overrides where taken = result.take(m.nonzero()[0], axis=axis) r = maybe_downcast_numeric(taken, self.dtype) nb = self.make_block(r.T, placement=self.mgr_locs[m]) result_blocks.append(nb) return result_blocks def _unstack(self, unstacker, fill_value, new_placement): """ Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack Returns ------- blocks : list of Block New blocks of unstacked values. mask : array_like of bool The mask of columns of `blocks` we should keep. """ new_values, mask = unstacker.get_new_values( self.values.T, fill_value=fill_value ) mask = mask.any(0) # TODO: in all tests we have mask.all(); can we rely on that? new_values = new_values.T[mask] new_placement = new_placement[mask] blocks = [make_block(new_values, placement=new_placement)] return blocks, mask def quantile(self, qs, interpolation="linear", axis: int = 0): """ compute the quantiles of the Parameters ---------- qs: a scalar or list of the quantiles to be computed interpolation: type of interpolation, default 'linear' axis: axis to compute, default 0 Returns ------- Block """ # We should always have ndim == 2 because Series dispatches to DataFrame assert self.ndim == 2 values = self.get_values() is_empty = values.shape[axis] == 0 orig_scalar = not is_list_like(qs) if orig_scalar: # make list-like, unpack later qs = [qs] if is_empty: # create the array of na_values # 2d len(values) * len(qs) result = np.repeat( np.array([self.fill_value] * len(qs)), len(values) ).reshape(len(values), len(qs)) else: # asarray needed for Sparse, see GH#24600 mask = np.asarray(isna(values)) result = nanpercentile( values, np.array(qs) * 100, axis=axis, na_value=self.fill_value, mask=mask, ndim=values.ndim, interpolation=interpolation, ) result = np.array(result, copy=False) result = result.T if orig_scalar and not lib.is_scalar(result): # result could be scalar in case with is_empty and self.ndim == 1 assert result.shape[-1] == 1, result.shape result = result[..., 0] result = lib.item_from_zerodim(result) ndim = np.ndim(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim) def _replace_coerce( self, to_replace, value, mask: np.ndarray, inplace: bool = True, regex: bool = False, ) -> List[Block]: """ Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block] """ if mask.any(): if not regex: nb = self.coerce_to_target_dtype(value) if nb is self and not inplace: nb = nb.copy() putmask_inplace(nb.values, mask, value) return [nb] else: regex = _should_use_regex(regex, to_replace) if regex: return self._replace_regex( to_replace, value, inplace=inplace, convert=False, mask=mask, ) return self.replace(to_replace, value, inplace=inplace, regex=False) return [self] class ExtensionBlock(Block): """ Block for holding extension types. Notes ----- This holds all 3rd-party extension array types. It's also the immediate parent class for our internal extension types' blocks, CategoricalBlock. ExtensionArrays are limited to 1-D. """ _can_consolidate = False _validate_ndim = False is_extension = True values: ExtensionArray def __init__(self, values, placement, ndim: int): """ Initialize a non-consolidatable block. 'ndim' may be inferred from 'placement'. This will call continue to call __init__ for the other base classes mixed in with this Mixin. """ # Placement must be converted to BlockPlacement so that we can check # its length if not isinstance(placement, libinternals.BlockPlacement): placement = libinternals.BlockPlacement(placement) # Maybe infer ndim from placement if ndim is None: if len(placement) != 1: ndim = 1 else: ndim = 2 super().__init__(values, placement, ndim=ndim) if self.ndim == 2 and len(self.mgr_locs) != 1: # TODO(EA2D): check unnecessary with 2D EAs raise AssertionError("block.size != values.size") @property def shape(self): # TODO(EA2D): override unnecessary with 2D EAs if self.ndim == 1: return (len(self.values),) return len(self.mgr_locs), len(self.values) def iget(self, col): if self.ndim == 2 and isinstance(col, tuple): # TODO(EA2D): unnecessary with 2D EAs col, loc = col if not com.is_null_slice(col) and col != 0: raise IndexError(f"{self} only contains one item") elif isinstance(col, slice): if col != slice(None): raise NotImplementedError(col) return self.values[[loc]] return self.values[loc] else: if col != 0: raise IndexError(f"{self} only contains one item") return self.values def set_inplace(self, locs, values): # NB: This is a misnomer, is supposed to be inplace but is not, # see GH#33457 assert locs.tolist() == [0] self.values = values def putmask(self, mask, new) -> List[Block]: """ See Block.putmask.__doc__ """ mask = _extract_bool_array(mask) new_values = self.values if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): new = new[mask] mask = safe_reshape(mask, new_values.shape) new_values[mask] = new return [self.make_block(values=new_values)] def _maybe_coerce_values(self, values): """ Unbox to an extension array. This will unbox an ExtensionArray stored in an Index or Series. ExtensionArrays pass through. No dtype coercion is done. Parameters ---------- values : Index, Series, ExtensionArray Returns ------- ExtensionArray """ return extract_array(values) @property def _holder(self): # For extension blocks, the holder is values-dependent. return type(self.values) @property def fill_value(self): # Used in reindex_indexer return self.values.dtype.na_value @property def _can_hold_na(self): # The default ExtensionArray._can_hold_na is True return self._holder._can_hold_na @property def is_view(self) -> bool: """Extension arrays are never treated as views.""" return False @property def is_numeric(self): return self.values.dtype._is_numeric def setitem(self, indexer, value): """ Attempt self.values[indexer] = value, possibly creating a new array. This differs from Block.setitem by not allowing setitem to change the dtype of the Block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ if not self._can_hold_element(value): # This is only relevant for DatetimeTZBlock, which has a # non-trivial `_can_hold_element`. # https://github.com/pandas-dev/pandas/issues/24020 # Need a dedicated setitem until GH#24020 (type promotion in setitem # for extension arrays) is designed and implemented. return self.astype(object).setitem(indexer, value) if isinstance(indexer, tuple): # TODO(EA2D): not needed with 2D EAs # we are always 1-D indexer = indexer[0] check_setitem_lengths(indexer, value, self.values) self.values[indexer] = value return self def get_values(self, dtype: Optional[Dtype] = None): # ExtensionArrays must be iterable, so this works. # TODO(EA2D): reshape not needed with 2D EAs return np.asarray(self.values).reshape(self.shape) def array_values(self) -> ExtensionArray: return self.values def to_native_types(self, na_rep="nan", quoting=None, **kwargs): """override to use ExtensionArray astype for the conversion""" values = self.values mask = isna(values) values = np.asarray(values.astype(object)) values[mask] = na_rep # TODO(EA2D): reshape not needed with 2D EAs # we are expected to return a 2-d ndarray return self.make_block(values) def take_nd( self, indexer, axis: int = 0, new_mgr_locs=None, fill_value=lib.no_default ): """ Take values according to indexer and return them as a block. """ if fill_value is lib.no_default: fill_value = None # TODO(EA2D): special case not needed with 2D EAs # axis doesn't matter; we are really a single-dim object # but are passed the axis depending on the calling routing # if its REALLY axis 0, then this will be a reindex and not a take new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True) # Called from three places in managers, all of which satisfy # this assertion assert not (self.ndim == 1 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self.mgr_locs return self.make_block_same_class(new_values, new_mgr_locs) def _can_hold_element(self, element: Any) -> bool: # TODO: We may need to think about pushing this onto the array. # We're doing the same as CategoricalBlock here. return True def _slice(self, slicer): """ Return a slice of my values. Parameters ---------- slicer : slice, ndarray[int], or a tuple of these Valid (non-reducing) indexer for self.values. Returns ------- np.ndarray or ExtensionArray """ # return same dims as we currently have if not isinstance(slicer, tuple) and self.ndim == 2: # reached via getitem_block via _slice_take_blocks_ax0 # TODO(EA2D): won't be necessary with 2D EAs slicer = (slicer, slice(None)) if isinstance(slicer, tuple) and len(slicer) == 2: first = slicer[0] if not isinstance(first, slice): raise AssertionError( "invalid slicing for a 1-ndim ExtensionArray", first ) # GH#32959 only full-slicers along fake-dim0 are valid # TODO(EA2D): won't be necessary with 2D EAs new_locs = self.mgr_locs[first] if len(new_locs): # effectively slice(None) slicer = slicer[1] else: raise AssertionError( "invalid slicing for a 1-ndim ExtensionArray", slicer ) return self.values[slicer] def fillna(self, value, limit=None, inplace=False, downcast=None): values = self.values if inplace else self.values.copy() values = values.fillna(value=value, limit=limit) return [ self.make_block_same_class( values=values, placement=self.mgr_locs, ndim=self.ndim ) ] def interpolate( self, method="pad", axis=0, inplace=False, limit=None, fill_value=None, **kwargs ): values = self.values if inplace else self.values.copy() return self.make_block_same_class( values=values.fillna(value=fill_value, method=method, limit=limit), placement=self.mgr_locs, ) def diff(self, n: int, axis: int = 1) -> List[Block]: if axis == 0 and n != 0: # n==0 case will be a no-op so let is fall through # Since we only have one column, the result will be all-NA. # Create this result by shifting along axis=0 past the length of # our values. return super().diff(len(self.values), axis=0) if axis == 1: # TODO(EA2D): unnecessary with 2D EAs # we are by definition 1D. axis = 0 return super().diff(n, axis) def shift( self, periods: int, axis: int = 0, fill_value: Any = None ) -> List[ExtensionBlock]: """ Shift the block by `periods`. Dispatches to underlying ExtensionArray and re-boxes in an ExtensionBlock. """ return [ self.make_block_same_class( self.values.shift(periods=periods, fill_value=fill_value), placement=self.mgr_locs, ndim=self.ndim, ) ] def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: cond = _extract_bool_array(cond) assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) if isinstance(other, np.ndarray) and other.ndim == 2: # TODO(EA2D): unnecessary with 2D EAs assert other.shape[1] == 1 other = other[:, 0] if isinstance(cond, np.ndarray) and cond.ndim == 2: # TODO(EA2D): unnecessary with 2D EAs assert cond.shape[1] == 1 cond = cond[:, 0] if lib.is_scalar(other) and isna(other): # The default `other` for Series / Frame is np.nan # we want to replace that with the correct NA value # for the type other = self.dtype.na_value if is_sparse(self.values): # TODO(SparseArray.__setitem__): remove this if condition # We need to re-infer the type of the data after doing the # where, for cases where the subtypes don't match dtype = None else: dtype = self.dtype result = self.values.copy() icond = ~cond if lib.is_scalar(other): set_other = other else: set_other = other[icond] try: result[icond] = set_other except (NotImplementedError, TypeError): # NotImplementedError for class not implementing `__setitem__` # TypeError for SparseArray, which implements just to raise # a TypeError result = self._holder._from_sequence( np.where(cond, self.values, other), dtype=dtype ) return [self.make_block_same_class(result, placement=self.mgr_locs)] def _unstack(self, unstacker, fill_value, new_placement): # ExtensionArray-safe unstack. # We override ObjectBlock._unstack, which unstacks directly on the # values of the array. For EA-backed blocks, this would require # converting to a 2-D ndarray of objects. # Instead, we unstack an ndarray of integer positions, followed by # a `take` on the actual values. n_rows = self.shape[-1] dummy_arr = np.arange(n_rows) new_values, mask = unstacker.get_new_values(dummy_arr, fill_value=-1) mask = mask.any(0) # TODO: in all tests we have mask.all(); can we rely on that? blocks = [ self.make_block_same_class( self.values.take(indices, allow_fill=True, fill_value=fill_value), [place], ) for indices, place in zip(new_values.T, new_placement) ] return blocks, mask class HybridMixin: """ Mixin for Blocks backed (maybe indirectly) by ExtensionArrays. """ array_values: Callable def _can_hold_element(self, element: Any) -> bool: values = self.array_values() try: values._validate_setitem_value(element) return True except (ValueError, TypeError): return False class ObjectValuesExtensionBlock(HybridMixin, ExtensionBlock): """ Block providing backwards-compatibility for `.values`. Used by PeriodArray and IntervalArray to ensure that Series[T].values is an ndarray of objects. """ def external_values(self): return self.values.astype(object) class NumericBlock(Block): __slots__ = () is_numeric = True def _can_hold_element(self, element: Any) -> bool: return can_hold_element(self.dtype, element) @property def _can_hold_na(self): return self.dtype.kind not in ["b", "i", "u"] @property def is_bool(self): return self.dtype.kind == "b" class FloatBlock(NumericBlock): __slots__ = () is_float = True def to_native_types( self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs ): """ convert to our native types format """ values = self.values # see gh-13418: no special formatting is desired at the # output (important for appropriate 'quoting' behaviour), # so do not pass it through the FloatArrayFormatter if float_format is None and decimal == ".": mask = isna(values) if not quoting: values = values.astype(str) else: values = np.array(values, dtype="object") values[mask] = na_rep return self.make_block(values) from pandas.io.formats.format import FloatArrayFormatter formatter = FloatArrayFormatter( values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) res = formatter.get_result_as_array() return self.make_block(res) class DatetimeLikeBlockMixin(HybridMixin, Block): """Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.""" @property def _holder(self): return DatetimeArray @property def fill_value(self): return np.datetime64("NaT", "ns") def get_values(self, dtype: Optional[Dtype] = None): """ return object dtype as boxed values, such as Timestamps/Timedelta """ if is_object_dtype(dtype): # DTA/TDA constructor and astype can handle 2D return self._holder(self.values).astype(object) return self.values def internal_values(self): # Override to return DatetimeArray and TimedeltaArray return self.array_values() def array_values(self): return self._holder._simple_new(self.values) def iget(self, key): # GH#31649 we need to wrap scalars in Timestamp/Timedelta # TODO(EA2D): this can be removed if we ever have 2D EA return self.array_values().reshape(self.shape)[key] def diff(self, n: int, axis: int = 0) -> List[Block]: """ 1st discrete difference. Parameters ---------- n : int Number of periods to diff. axis : int, default 0 Axis to diff upon. Returns ------- A list with a new TimeDeltaBlock. Notes ----- The arguments here are mimicking shift so they are called correctly by apply. """ # TODO(EA2D): reshape not necessary with 2D EAs values = self.array_values().reshape(self.shape) new_values = values - values.shift(n, axis=axis) return [ TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer, ndim=self.ndim) ] def shift(self, periods, axis=0, fill_value=None): # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs values = self.array_values() new_values = values.shift(periods, fill_value=fill_value, axis=axis) return self.make_block_same_class(new_values) def to_native_types(self, na_rep="NaT", **kwargs): """ convert to our native types format """ arr = self.array_values() result = arr._format_native_types(na_rep=na_rep, **kwargs) return self.make_block(result) def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: # TODO(EA2D): reshape unnecessary with 2D EAs arr = self.array_values().reshape(self.shape) cond = _extract_bool_array(cond) try: res_values = arr.T.where(cond, other).T except (ValueError, TypeError): return super().where(other, cond, errors=errors, axis=axis) # TODO(EA2D): reshape not needed with 2D EAs res_values = res_values.reshape(self.values.shape) nb = self.make_block_same_class(res_values) return [nb] class DatetimeBlock(DatetimeLikeBlockMixin): __slots__ = () is_datetime = True @property def _can_hold_na(self): return True def _maybe_coerce_values(self, values): """ Input validation for values passed to __init__. Ensure that we have datetime64ns, coercing if necessary. Parameters ---------- values : array-like Must be convertible to datetime64 Returns ------- values : ndarray[datetime64ns] Overridden by DatetimeTZBlock. """ if values.dtype != DT64NS_DTYPE: values = conversion.ensure_datetime64ns(values) if isinstance(values, DatetimeArray): values = values._data assert isinstance(values, np.ndarray), type(values) return values def set_inplace(self, locs, values): """ See Block.set.__doc__ """ values = conversion.ensure_datetime64ns(values, copy=False) self.values[locs] = values class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): """ implement a datetime64 block with a tz attribute """ values: DatetimeArray __slots__ = () is_datetimetz = True is_extension = True internal_values = Block.internal_values _can_hold_element = DatetimeBlock._can_hold_element to_native_types = DatetimeBlock.to_native_types diff = DatetimeBlock.diff fill_value = np.datetime64("NaT", "ns") where = DatetimeBlock.where array_values = ExtensionBlock.array_values @property def _holder(self): return DatetimeArray def _maybe_coerce_values(self, values): """ Input validation for values passed to __init__. Ensure that we have datetime64TZ, coercing if necessary. Parameters ---------- values : array-like Must be convertible to datetime64 Returns ------- values : DatetimeArray """ if not isinstance(values, self._holder): values = self._holder(values) if values.tz is None: raise ValueError("cannot create a DatetimeTZBlock without a tz") return values @property def is_view(self) -> bool: """ return a boolean if I am possibly a view """ # check the ndarray values of the DatetimeIndex values return self.values._data.base is not None def get_values(self, dtype: Optional[Dtype] = None): """ Returns an ndarray of values. Parameters ---------- dtype : np.dtype Only `object`-like dtypes are respected here (not sure why). Returns ------- values : ndarray When ``dtype=object``, then and object-dtype ndarray of boxed values is returned. Otherwise, an M8[ns] ndarray is returned. DatetimeArray is always 1-d. ``get_values`` will reshape the return value to be the same dimensionality as the block. """ values = self.values if is_object_dtype(dtype): values = values.astype(object) # TODO(EA2D): reshape unnecessary with 2D EAs # Ensure that our shape is correct for DataFrame. # ExtensionArrays are always 1-D, even in a DataFrame when # the analogous NumPy-backed column would be a 2-D ndarray. return np.asarray(values).reshape(self.shape) def external_values(self): # NB: this is different from np.asarray(self.values), since that # return an object-dtype ndarray of Timestamps. if self.is_datetimetz: # avoid FutureWarning in .astype in casting from dt64t to dt64 return self.values._data return np.asarray(self.values.astype("datetime64[ns]", copy=False)) def fillna(self, value, limit=None, inplace=False, downcast=None): # We support filling a DatetimeTZ with a `value` whose timezone # is different by coercing to object. if self._can_hold_element(value): return super().fillna(value, limit, inplace, downcast) # different timezones, or a non-tz return self.astype(object).fillna( value, limit=limit, inplace=inplace, downcast=downcast ) def quantile(self, qs, interpolation="linear", axis=0): naive = self.values.view("M8[ns]") # TODO(EA2D): kludge for 2D block with 1D values naive = naive.reshape(self.shape) blk = self.make_block(naive) res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis) # TODO(EA2D): ravel is kludge for 2D block with 1D values, assumes column-like aware = self._holder(res_blk.values.ravel(), dtype=self.dtype) return self.make_block_same_class(aware, ndim=res_blk.ndim) def _check_ndim(self, values, ndim): """ ndim inference and validation. This is overridden by the DatetimeTZBlock to check the case of 2D data (values.ndim == 2), which should only be allowed if ndim is also 2. The case of 1D array is still allowed with both ndim of 1 or 2, as if the case for other EAs. Therefore, we are only checking `values.ndim > ndim` instead of `values.ndim != ndim` as for consolidated blocks. """ if ndim is None: ndim = values.ndim if values.ndim > ndim: raise ValueError( "Wrong number of dimensions. " f"values.ndim != ndim [{values.ndim} != {ndim}]" ) return ndim class TimeDeltaBlock(DatetimeLikeBlockMixin): __slots__ = () is_timedelta = True _can_hold_na = True is_numeric = False fill_value = np.timedelta64("NaT", "ns") def _maybe_coerce_values(self, values): if values.dtype != TD64NS_DTYPE: # non-nano we will convert to nano if values.dtype.kind != "m": # caller is responsible for ensuring timedelta64 dtype raise TypeError(values.dtype) # pragma: no cover values = TimedeltaArray._from_sequence(values)._data if isinstance(values, TimedeltaArray): values = values._data assert isinstance(values, np.ndarray), type(values) return values @property def _holder(self): return TimedeltaArray def fillna(self, value, **kwargs): # TODO(EA2D): if we operated on array_values, TDA.fillna would handle # raising here. if is_integer(value): # Deprecation GH#24694, GH#19233 raise TypeError( "Passing integers to fillna for timedelta64[ns] dtype is no " "longer supported. To obtain the old behavior, pass " "`pd.Timedelta(seconds=n)` instead." ) return super().fillna(value, **kwargs) class ObjectBlock(Block): __slots__ = () is_object = True _can_hold_na = True def _maybe_coerce_values(self, values): if issubclass(values.dtype.type, (str, bytes)): values = np.array(values, dtype=object) return values @property def is_bool(self): """ we can be a bool if we have only bool values but are of type object """ return lib.is_bool_array(self.values.ravel("K")) def reduce(self, func, ignore_failures: bool = False) -> List[Block]: """ For object-dtype, we operate column-wise. """ assert self.ndim == 2 values = self.values if len(values) > 1: # split_and_operate expects func with signature (mask, values, inplace) def mask_func(mask, values, inplace): if values.ndim == 1: values = values.reshape(1, -1) return func(values) return self.split_and_operate( None, mask_func, False, ignore_failures=ignore_failures ) try: res = func(values) except TypeError: if not ignore_failures: raise return [] assert isinstance(res, np.ndarray) assert res.ndim == 1 res = res.reshape(1, -1) return [self.make_block_same_class(res)] def convert( self, copy: bool = True, datetime: bool = True, numeric: bool = True, timedelta: bool = True, ) -> List[Block]: """ attempt to cast any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! """ # operate column-by-column def f(mask, val, idx): shape = val.shape values = soft_convert_objects( val.ravel(), datetime=datetime, numeric=numeric, timedelta=timedelta, copy=copy, ) if isinstance(values, np.ndarray): # TODO(EA2D): allow EA once reshape is supported values = values.reshape(shape) return values if self.ndim == 2: blocks = self.split_and_operate(None, f, False) else: values = f(None, self.values.ravel(), None) blocks = [self.make_block(values)] return blocks def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: if downcast is not None: return blocks # split and convert the blocks return extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks]) def _can_hold_element(self, element: Any) -> bool: return True def replace( self, to_replace, value, inplace: bool = False, regex: bool = False, ) -> List[Block]: # Note: the checks we do in NDFrame.replace ensure we never get # here with listlike to_replace or value, as those cases # go through _replace_list regex = _should_use_regex(regex, to_replace) if regex: return self._replace_regex(to_replace, value, inplace=inplace) else: return super().replace(to_replace, value, inplace=inplace, regex=False) def _should_use_regex(regex: bool, to_replace: Any) -> bool: """ Decide whether to treat `to_replace` as a regular expression. """ if is_re(to_replace): regex = True regex = regex and is_re_compilable(to_replace) # Don't use regex if the pattern is empty. regex = regex and re.compile(to_replace).pattern != "" return regex class CategoricalBlock(ExtensionBlock): __slots__ = () def _replace_list( self, src_list: List[Any], dest_list: List[Any], inplace: bool = False, regex: bool = False, ) -> List[Block]: if len(algos.unique(dest_list)) == 1: # We likely got here by tiling value inside NDFrame.replace, # so un-tile here return self.replace(src_list, dest_list[0], inplace, regex) return super()._replace_list(src_list, dest_list, inplace, regex) def replace( self, to_replace, value, inplace: bool = False, regex: bool = False, ) -> List[Block]: inplace = validate_bool_kwarg(inplace, "inplace") result = self if inplace else self.copy() result.values.replace(to_replace, value, inplace=True) return [result] # ----------------------------------------------------------------- # Constructor Helpers def get_block_type(values, dtype: Optional[Dtype] = None): """ Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- values : ndarray-like dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block """ # We use vtype and kind checks because they are much more performant # than is_foo_dtype dtype = cast(np.dtype, pandas_dtype(dtype) if dtype else values.dtype) vtype = dtype.type kind = dtype.kind cls: Type[Block] if is_sparse(dtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif isinstance(dtype, CategoricalDtype): cls = CategoricalBlock elif vtype is Timestamp: cls = DatetimeTZBlock elif vtype is Interval or vtype is Period: cls = ObjectValuesExtensionBlock elif isinstance(dtype, ExtensionDtype): # Note: need to be sure PandasArray is unwrapped before we get here cls = ExtensionBlock elif kind == "M": cls = DatetimeBlock elif kind == "m": cls = TimeDeltaBlock elif kind == "f": cls = FloatBlock elif kind in ["c", "i", "u", "b"]: cls = NumericBlock else: cls = ObjectBlock return cls def make_block(values, placement, klass=None, ndim=None, dtype: Optional[Dtype] = None): # Ensure that we don't allow PandasArray / PandasDtype in internals. # For now, blocks should be backed by ndarrays when possible. if isinstance(values, ABCPandasArray): values = values.to_numpy() if ndim and ndim > 1: # TODO(EA2D): special case not needed with 2D EAs values = np.atleast_2d(values) if isinstance(dtype, PandasDtype): dtype = dtype.numpy_dtype if klass is None: dtype = dtype or values.dtype klass = get_block_type(values, dtype) elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype): # TODO: This is no longer hit internally; does it need to be retained # for e.g. pyarrow? values = DatetimeArray._simple_new(values, dtype=dtype) return klass(values, ndim=ndim, placement=placement) # ----------------------------------------------------------------- def extend_blocks(result, blocks=None): """ return a new extended blocks, given the result """ if blocks is None: blocks = [] if isinstance(result, list): for r in result: if isinstance(r, list): blocks.extend(r) else: blocks.append(r) else: assert isinstance(result, Block), type(result) blocks.append(result) return blocks def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: """ guarantee the shape of the values to be at least 1 d """ if values.ndim < ndim: shape = values.shape if not is_extension_array_dtype(values.dtype): # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. # error: "ExtensionArray" has no attribute "reshape" values = values.reshape(tuple((1,) + shape)) # type: ignore[attr-defined] return values def safe_reshape(arr: ArrayLike, new_shape: Shape) -> ArrayLike: """ Reshape `arr` to have shape `new_shape`, unless it is an ExtensionArray, in which case it will be returned unchanged (see gh-13012). Parameters ---------- arr : np.ndarray or ExtensionArray new_shape : Tuple[int] Returns ------- np.ndarray or ExtensionArray """ if not is_extension_array_dtype(arr.dtype): # Note: this will include TimedeltaArray and tz-naive DatetimeArray # TODO(EA2D): special case will be unnecessary with 2D EAs arr = np.asarray(arr).reshape(new_shape) return arr def _extract_bool_array(mask: ArrayLike) -> np.ndarray: """ If we have a SparseArray or BooleanArray, convert it to ndarray[bool]. """ if isinstance(mask, ExtensionArray): # We could have BooleanArray, Sparse[bool], ... # Except for BooleanArray, this is equivalent to just # np.asarray(mask, dtype=bool) mask = mask.to_numpy(dtype=bool, na_value=False) assert isinstance(mask, np.ndarray), type(mask) assert mask.dtype == bool, mask.dtype return mask
the-stack_0_8204
"""Switch platform for Advantage Air integration.""" from homeassistant.helpers.entity import ToggleEntity from .const import ( ADVANTAGE_AIR_STATE_OFF, ADVANTAGE_AIR_STATE_ON, DOMAIN as ADVANTAGE_AIR_DOMAIN, ) from .entity import AdvantageAirEntity async def async_setup_entry(hass, config_entry, async_add_entities): """Set up AdvantageAir toggle platform.""" instance = hass.data[ADVANTAGE_AIR_DOMAIN][config_entry.entry_id] entities = [] for ac_key, ac_device in instance["coordinator"].data["aircons"].items(): if ac_device["info"]["freshAirStatus"] != "none": entities.append(AdvantageAirFreshAir(instance, ac_key)) async_add_entities(entities) class AdvantageAirFreshAir(AdvantageAirEntity, ToggleEntity): """Representation of Advantage Air fresh air control.""" _attr_icon = "mdi:air-filter" def __init__(self, instance, ac_key): """Initialize an Advantage Air fresh air control.""" super().__init__(instance, ac_key) self._attr_name = f'{self._ac["name"]} Fresh Air' self._attr_unique_id = ( f'{self.coordinator.data["system"]["rid"]}-{ac_key}-freshair' ) @property def is_on(self): """Return the fresh air status.""" return self._ac["freshAirStatus"] == ADVANTAGE_AIR_STATE_ON async def async_turn_on(self, **kwargs): """Turn fresh air on.""" await self.async_change( {self.ac_key: {"info": {"freshAirStatus": ADVANTAGE_AIR_STATE_ON}}} ) async def async_turn_off(self, **kwargs): """Turn fresh air off.""" await self.async_change( {self.ac_key: {"info": {"freshAirStatus": ADVANTAGE_AIR_STATE_OFF}}} )
the-stack_0_8206
#coding=utf8 import os import itchat from NetEaseMusicApi import interact_select_song HELP_MSG = u'''\ 欢迎使用微信网易云音乐 帮助: 显示帮助 关闭: 关闭歌曲 歌名: 按照引导播放音乐\ ''' with open('stop.mp3', 'w') as f: pass def close_music(): os.startfile('stop.mp3') @itchat.msg_register(itchat.content.TEXT) def music_player(msg): if msg['ToUserName'] != 'filehelper': return if msg['Text'] == u'关闭': close_music() itchat.send(u'音乐已关闭', 'filehelper') if msg['Text'] == u'帮助': itchat.send(HELP_MSG, 'filehelper') else: itchat.send(interact_select_song(msg['Text']), 'filehelper') itchat.auto_login(True, enableCmdQR=True) itchat.send(HELP_MSG, 'filehelper') itchat.run()
the-stack_0_8209
import pytest from hypothesis import given, settings, HealthCheck from hypothesis import reproduce_failure # pylint: disable=unused-import from itertools import product import numpy as np from tests.hypothesis_helper import dfs_min2, dfs_no_min from os import environ if environ.get("TRAVIS"): max_examples = 10 deadline = None else: max_examples = 100 deadline = None strandedness = [False, "same", "opposite"] binary_methods = [ "set_union", "set_intersect", "overlap", "nearest", "intersect", "subtract", "join" ] unary_methods = [ "merge", "sort", "cluster", "pc", "mpc", "spc", "drop_duplicate_positions", "drop" ] method_chain = product(binary_methods, binary_methods) # cannot start with an operation that makes pyrange unstranded and then try a stranded op strandedness_chain = list(product(["same", "opposite"], strandedness)) + list( product(strandedness, [None])) @pytest.mark.bedtools @pytest.mark.parametrize("strandedness_chain,method_chain", product(strandedness_chain, method_chain)) @settings( max_examples=max_examples, deadline=deadline, print_blob=True, suppress_health_check=HealthCheck.all()) @given(gr=dfs_no_min(), gr2=dfs_no_min(), gr3=dfs_no_min()) # pylint: disable=no-value-for-parameter # @reproduce_failure('5.5.4', b'AXicY2RAA4xIJCoLygcAALIABg==') # test_three_in_a_row[strandedness_chain122-method_chain122] # @reproduce_failure('5.5.4', b'AXicY2QAAUYGKGBkxM9nAAABEAAJ') # test_three_in_a_row[strandedness_chain45-method_chain45] # @reproduce_failure('5.5.4', b'AXicY2RAA4xIJDY+AAC2AAY=') # test_three_in_a_row[strandedness_chain24-method_chain24] def test_three_in_a_row(gr, gr2, gr3, strandedness_chain, method_chain): s1, s2 = strandedness_chain f1, f2 = method_chain suffix_methods = ["nearest", "join"] if f1 in suffix_methods and f2 in suffix_methods: m1 = getattr(gr, f1) gr2 = m1(gr2, strandedness=s1) if len(gr2) > 0: assert gr2.Start.dtype == np.int64 assert (gr2.Start >= 0).all() and (gr2.End >= 0).all() m2 = getattr(gr2, f2) gr3 = m2(gr3, strandedness=s2, suffix="_c") print(gr3) if len(gr3) > 0: assert gr3.Start.dtype == np.int64 assert (gr3.Start >= 0).all() and (gr3.End >= 0).all() else: m1 = getattr(gr, f1) gr2 = m1(gr2, strandedness=s1) if len(gr2) > 0: assert gr2.Start.dtype == np.int64 assert (gr2.Start >= 0).all() and (gr2.End >= 0).all() m2 = getattr(gr2, f2) gr3 = m2(gr3, strandedness=s2) print(gr3) if len(gr3) > 0: assert gr3.Start.dtype == np.int64 assert (gr3.Start >= 0).all() and (gr3.End >= 0).all() # @pytest.mark.bedtools # @pytest.mark.parametrize("strandedness_chain,method_chain", # product(strandedness_chain, method_chain)) # @settings( # max_examples=max_examples, # deadline=deadline, # suppress_health_check=HealthCheck.all()) # @given(gr=dfs_no_min(), gr2=dfs_no_min(), gr3=dfs_no_min()) # pylint: disable=no-value-for-parameter # def test_three_in_a_row(gr, gr2, gr3, strandedness_chain, method_chain): # s1, s2 = strandedness_chain # f1, f2 = method_chain # # print(s1, s2) # # print(f1, f2) # m1 = getattr(gr, f1) # gr2 = m1(gr2, strandedness=s1) # m2 = getattr(gr2, f2) # gr3 = m2(gr3, strandedness=s2)
the-stack_0_8210
import os import shlex import subprocess import h5py import numpy as np import torch import torch.utils.data as data BASE_DIR = os.path.dirname(os.path.abspath(__file__)) def _get_data_files(list_filename): with open(list_filename) as f: return [line.rstrip() for line in f] def _load_data_file(name): f = h5py.File(name, "r") data = f["data"][:] label = f["label"][:] return data, label class Indoor3DSemSeg(data.Dataset): def __init__(self, num_points, train=True, download=True, data_precent=1.0): super().__init__() self.data_precent = data_precent self.folder = "indoor3d_sem_seg_hdf5_data" self.data_dir = os.path.join(BASE_DIR, self.folder) self.url = ( "https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip" ) if download and not os.path.exists(self.data_dir): zipfile = os.path.join(BASE_DIR, os.path.basename(self.url)) subprocess.check_call( shlex.split("curl {} -o {}".format(self.url, zipfile)) ) subprocess.check_call( shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR)) ) subprocess.check_call(shlex.split("rm {}".format(zipfile))) self.train, self.num_points = train, num_points all_files = _get_data_files(os.path.join(self.data_dir, "all_files.txt")) room_filelist = _get_data_files( os.path.join(self.data_dir, "room_filelist.txt") ) data_batchlist, label_batchlist = [], [] for f in all_files: data, label = _load_data_file(os.path.join(BASE_DIR, f)) data_batchlist.append(data) label_batchlist.append(label) data_batches = np.concatenate(data_batchlist, 0) labels_batches = np.concatenate(label_batchlist, 0) test_area = "Area_5" train_idxs, test_idxs = [], [] for i, room_name in enumerate(room_filelist): if test_area in room_name: test_idxs.append(i) else: train_idxs.append(i) if self.train: self.points = data_batches[train_idxs, ...] self.labels = labels_batches[train_idxs, ...] else: self.points = data_batches[test_idxs, ...] self.labels = labels_batches[test_idxs, ...] def __getitem__(self, idx): pt_idxs = np.arange(0, self.num_points) np.random.shuffle(pt_idxs) current_points = torch.from_numpy(self.points[idx, pt_idxs].copy()).float() current_labels = torch.from_numpy(self.labels[idx, pt_idxs].copy()).long() return current_points, current_labels def __len__(self): return int(self.points.shape[0] * self.data_precent) def set_num_points(self, pts): self.num_points = pts def randomize(self): pass if __name__ == "__main__": dset = Indoor3DSemSeg(16, "./", train=True) print(dset[0]) print(len(dset)) dloader = torch.utils.data.DataLoader(dset, batch_size=32, shuffle=True) for i, data in enumerate(dloader, 0): inputs, labels = data if i == len(dloader) - 1: print(inputs.size())
the-stack_0_8212
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Learning rate decay functions.""" import math from megatron import print_rank_0 class AnnealingLR(object): """Anneals the learning rate.""" def __init__(self, optimizer, start_lr, warmup_iter, total_iters, decay_style, last_iter, min_lr=0.0, use_checkpoint_lr_scheduler=True, override_lr_scheduler=False): # Class values. self.optimizer = optimizer self.start_lr = start_lr self.min_lr = min_lr self.warmup_iter = warmup_iter self.num_iters = last_iter self.end_iter = total_iters assert self.end_iter > 0 self.decay_style = decay_style self.override_lr_scheduler = override_lr_scheduler self.use_checkpoint_lr_scheduler = use_checkpoint_lr_scheduler if self.override_lr_scheduler: assert not self.use_checkpoint_lr_scheduler, 'both override and '\ 'use-checkpoint are set.' # Set the learning rate self.step(self.num_iters) print_rank_0('> learning rate decay style: {}'.format(self.decay_style)) def get_lr(self): """Learning rate decay functions from: https://openreview.net/pdf?id=BJYwwY9ll pg. 4""" num_iters_ = min(self.num_iters, self.end_iter - self.warmup_iter) # Warmup. if self.warmup_iter > 0 and self.num_iters <= self.warmup_iter: return float(self.start_lr) * num_iters_ / self.warmup_iter num_iters_ = num_iters_ - self.warmup_iter if self.decay_style == 'linear': lr = self.start_lr * (self.end_iter - num_iters_) / self.end_iter elif self.decay_style == 'cosine': lr = self.start_lr / 2.0 * (math.cos( math.pi * num_iters_ / self.end_iter) + 1) elif self.decay_style == 'exponential': # exp(-0.693) = 1/2 lr = self.start_lr * math.exp(-0.693 * num_iters_ / self.end_iter) else: lr = self.start_lr return max(lr, self.min_lr) def step(self, step_num=None): """Set lr for all parameters groups.""" if step_num is None: step_num = self.num_iters + 1 self.num_iters = step_num new_lr = self.get_lr() for group in self.optimizer.param_groups: group['lr'] = new_lr def state_dict(self): state_dict = { 'start_lr': self.start_lr, 'warmup_iter': self.warmup_iter, 'num_iters': self.num_iters, 'decay_style': self.decay_style, 'end_iter': self.end_iter, 'min_lr': self.min_lr } return state_dict def _check_and_set(self, cls_value, sd_value, name): """Auxiliary function for checking the values in the checkpoint and setting them.""" if self.override_lr_scheduler: print_rank_0(' > overriding {} value to {}'.format(name, cls_value)) return cls_value if not self.use_checkpoint_lr_scheduler: assert cls_value == sd_value, 'AnnealingLR: class input value' \ 'and checkpoint values for {} do not match'.format(name) print_rank_0(' > using checkpoint value {} for {}'.format(sd_value, name)) return sd_value def load_state_dict(self, sd): self.start_lr = self._check_and_set(self.start_lr, sd['start_lr'], 'learning rate') self.min_lr = self._check_and_set(self.min_lr, sd['min_lr'], 'minimum learning rate') self.warmup_iter = self._check_and_set(self.warmup_iter, sd['warmup_iter'], 'warmup iterations') self.end_iter = self._check_and_set(self.end_iter, sd['end_iter'], 'total number of iterations') self.decay_style = self._check_and_set(self.decay_style, sd['decay_style'], 'decay style') self.num_iters = sd['num_iters'] self.step(self.num_iters)
the-stack_0_8214
import random import numpy as np import matplotlib.pyplot as plt def plus_minus_one_generator(): #funcao que retora +1 ou -1, com probabilidade x = [-1,1,-1,1,-1,1] # de 50% para ambos. return random.choice(x) def main(): i =0 u = [] while(i < 50): u.append(plus_minus_one_generator()) ## constroi o u pedido i+=1 print(u, end = "\n\n") plt.ion() plt.plot(u) ##plota u plt.show() c = [1,0.7,-0.3] h = [0.9,-0.5,0.5,-0.4,0.3,-0.3,0.2,-0.1] y = np.convolve(c,u) ## y = c*u print(y, end = "\n\n") plt.ion() plt.plot(y) ##plota y plt.show() z = np.convolve(h,y) ## z = h*y print(z) plt.ion() plt.plot(z) ##plota z plt.show() plt.xlabel('U(azul), Y(verde) e Z(laranja)') # configurando as devidas # legendas ##e1 = [1,0,0,0,0,0,0,0] #teste da propiedade e1*u = z, ##u = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] # aproximadamente ##print(np.convolve(e1,u)) return main()
the-stack_0_8217
import numpy as np import os import tensorflow as tf from PIL import Image import utility as Utility from make_mnist_datasets import Make_mnist_datasets #global variants batchsize = 100 data_size = 6000 noise_num = 100 class_num = 10 n_epoch = 1000 l2_norm_lambda = 0.001 alpha_P = 0.5 alpha_pseudo = 0.1 alpha_apply_thr = 200 keep_prob_rate = 0.5 mnist_file_name = ["mnist_train_img.npy", "mnist_train_label.npy", "mnist_test_img.npy", "mnist_test_label.npy"] board_dir_name = "data27" #directory for tensorboard seed = 1234 np.random.seed(seed=seed) # adam_b1_d = 0.5 # adam_b1_c = 0.5 # adam_b1_g = 0.5 out_image_dir = './out_images_tripleGAN' #output image file out_model_dir = './out_models_tripleGAN' #output model file try: os.mkdir(out_image_dir) os.mkdir(out_model_dir) os.mkdir('./out_images_Debug') #for debug except: # print("mkdir error") pass make_mnist = Make_mnist_datasets(mnist_file_name, alpha_P) def leaky_relu(x, alpha): return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def gaussian_noise(input, std): #used at discriminator noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=std, dtype=tf.float32, seed=seed) return input + noise #generator------------------------------------------------------------------ def generator(y, z, reuse=False): with tf.variable_scope('generator', reuse=reuse): wg1 = tf.get_variable('wd1', [class_num + noise_num, 500], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bg1 = tf.get_variable('gb1', [500], initializer=tf.constant_initializer(0.0)) scaleg2 = tf.get_variable('sg2', [500], initializer=tf.constant_initializer(1.0)) betag2 = tf.get_variable('beg2', [500], initializer=tf.constant_initializer(0.0)) wg3 = tf.get_variable('wg3', [500, 500], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bg3 = tf.get_variable('bg3', [500], initializer=tf.constant_initializer(0.0)) scaleg4 = tf.get_variable('sg4', [500], initializer=tf.constant_initializer(1.0)) betag4 = tf.get_variable('beg4', [500], initializer=tf.constant_initializer(0.0)) wg5 = tf.get_variable('wg5', [500, 784], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bg5 = tf.get_variable('bg5', [784], initializer=tf.constant_initializer(0.0)) #concat label and noise concat0 = tf.concat([y, z], axis=1, name='G_concat0') #layer1 linear fc1 = tf.matmul(concat0, wg1, name='G_matmul1') + bg1 #softplus function sp1 = tf.log(tf.clip_by_value(1 + tf.exp(fc1), 1e-10, 1e+30), name='G_softmax1') #layer2 batch normalization batch_mean2, batch_var2 = tf.nn.moments(sp1, [0]) bn2 = tf.nn.batch_normalization(sp1, batch_mean2, batch_var2, betag2, scaleg2 , 0.0001, name='G_BN2') #layer3 linear fc3 = tf.matmul(bn2, wg3, name='G_matmul3') + bg3 #softplus function sp3 = tf.log(tf.clip_by_value(1 + tf.exp(fc3), 1e-10, 1e+30), name='G_softmax3') #layer4 batch normalization batch_mean4, batch_var4 = tf.nn.moments(sp3, [0]) bn4 = tf.nn.batch_normalization(sp3, batch_mean4, batch_var4, betag4, scaleg4 , 0.0001, name='G_BN4') #layer5 linear fc5 = tf.matmul(bn4, wg5, name='G_matmul5') + bg5 #sigmoid function sig5 = tf.nn.sigmoid(fc5, name='G_sigmoid5') #reshape to 28x28 image x_gen = tf.reshape(sig5, [-1, 28, 28, 1]) return x_gen, y #discriminator----------------------------------------------------------------- def discriminator(x, y, reuse=False): with tf.variable_scope('discriminator', reuse=reuse): wd1 = tf.get_variable('wd1', [794, 1000], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bd1 = tf.get_variable('bd1', [1000], initializer=tf.constant_initializer(0.0)) wd2 = tf.get_variable('wd2', [1000, 500], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bd2 = tf.get_variable('bd2', [500], initializer=tf.constant_initializer(0.0)) wd3 = tf.get_variable('wd3', [500, 250], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bd3 = tf.get_variable('bd3', [250], initializer=tf.constant_initializer(0.0)) wd4 = tf.get_variable('wd4', [250, 250], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bd4 = tf.get_variable('bd4', [250], initializer=tf.constant_initializer(0.0)) wd5 = tf.get_variable('wd5', [250, 250], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bd5 = tf.get_variable('bd5', [250], initializer=tf.constant_initializer(0.0)) wd6 = tf.get_variable('wd6', [250, 1], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bd6 = tf.get_variable('bd6', [1], initializer=tf.constant_initializer(0.0)) x_reshape = tf.reshape(x, [-1, 28 * 28]) # concat image and label concat0 = tf.concat([x_reshape, y], axis=1, name='D_concat0') # layer1 linear #gaussian noise gn1 = gaussian_noise(concat0, 0.3) #fully-connected fc1 = tf.matmul(gn1, wd1, name='D_matmul1') + bd1 # leakyReLU function lr1 = leaky_relu(fc1, alpha=0.2) # layer2 linear #gaussian noise gn2 = gaussian_noise(lr1, 0.5) #fully-connected fc2 = tf.matmul(gn2, wd2, name='D_matmul2') + bd2 # leakyReLU function lr2 = leaky_relu(fc2, alpha=0.2) # layer3 linear #gaussian noise gn3 = gaussian_noise(lr2, 0.5) #fully-connected fc3 = tf.matmul(gn3, wd3, name='D_matmul3') + bd3 # leakyReLU function lr3 = leaky_relu(fc3, alpha=0.2) # layer4 linear #gaussian noise gn4 = gaussian_noise(lr3, 0.5) #fully-connected fc4 = tf.matmul(gn4, wd4, name='D_matmul4') + bd4 # leakyReLU function lr4 = leaky_relu(fc4, alpha=0.2) # layer5 linear #gaussian noise gn5 = gaussian_noise(lr4, 0.5) #fully-connected fc5 = tf.matmul(gn5, wd5, name='D_matmul5') + bd5 # leakyReLU function lr5 = leaky_relu(fc5, alpha=0.2) # layer6 linear #gaussian noise gn6 = gaussian_noise(lr5, 0.5) #fully-connected fc6 = tf.matmul(gn6, wd6, name='D_matmul6') + bd6 # softplus function out_dis = tf.nn.sigmoid(fc6, name='D_sigmoid') norm_L2 = tf.nn.l2_loss(wd1) + tf.nn.l2_loss(wd2) + tf.nn.l2_loss(wd3) + tf.nn.l2_loss(wd4) + tf.nn.l2_loss(wd5) \ + tf.nn.l2_loss(wd6) return out_dis, norm_L2 #classifier----------------------------------------------------------------- def classifier(xc, keep_prob, reuse=False): with tf.variable_scope('classifier', reuse=reuse): wc1 = tf.get_variable('wc1', [5, 5, 1, 32], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bc1 = tf.get_variable('bc1', [32], initializer=tf.constant_initializer(0.0)) wc2 = tf.get_variable('wc2', [3, 3, 32, 64], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bc2 = tf.get_variable('bc2', [64], initializer=tf.constant_initializer(0.0)) wc3 = tf.get_variable('wc3', [3, 3, 64, 64], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bc3 = tf.get_variable('bc3', [64], initializer=tf.constant_initializer(0.0)) wc4 = tf.get_variable('wc4', [3, 3, 64, 128], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bc4 = tf.get_variable('bc4', [128], initializer=tf.constant_initializer(0.0)) wc5 = tf.get_variable('wc5', [3, 3, 128, 128], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bc5 = tf.get_variable('bc5', [128], initializer=tf.constant_initializer(0.0)) wc6 = tf.get_variable('wc6', [128, 10], initializer=tf.random_normal_initializer (mean=0.0, stddev=0.05, seed=seed), dtype=tf.float32) bc6 = tf.get_variable('bc6', [10], initializer=tf.constant_initializer(0.0)) #layer1 convolution conv1 = tf.nn.conv2d(xc, wc1, strides=[1, 1, 1, 1], padding="SAME", name='C_conv1') + bc1 # relu function conv1_relu = tf.nn.relu(conv1) #max pooling conv1_pool = tf.nn.max_pool(conv1_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") #drop out conv1_drop = tf.nn.dropout(conv1_pool, keep_prob) #layer2 convolution conv2 = tf.nn.conv2d(conv1_drop, wc2, strides=[1, 1, 1, 1], padding="SAME", name='C_conv2') + bc2 # relu function conv2_relu = tf.nn.relu(conv2) #layer3 convolution conv3 = tf.nn.conv2d(conv2_relu, wc3, strides=[1, 1, 1, 1], padding="SAME", name='C_conv3') + bc3 # relu function conv3_relu = tf.nn.relu(conv3) #max pooling conv3_pool = tf.nn.max_pool(conv3_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") #drop out conv3_drop = tf.nn.dropout(conv3_pool, keep_prob) #layer4 convolution conv4 = tf.nn.conv2d(conv3_drop, wc4, strides=[1, 1, 1, 1], padding="SAME", name='C_conv4') + bc4 # relu function conv4_relu = tf.nn.relu(conv4) #layer5 convolution conv5 = tf.nn.conv2d(conv4_relu, wc5, strides=[1, 1, 1, 1], padding="SAME", name='C_conv5') + bc5 # relu function conv5_relu = tf.nn.relu(conv5) # conv6 = tf.nn.conv2d(conv5_relu, wc6, strides=[1, 1, 1, 1], padding="SAME") + bc6 # global average pooling.... reduce mean ap5 = tf.reduce_mean(conv5_relu, axis=[1, 2], name='C_global_average') #layer6 full-connected fc6 = tf.matmul(ap5, wc6, name='C_matmul6') + bc6 #softmax yc = tf.nn.softmax(fc6, name='C_softmax') # tf.summary.histogram("Cconv1", conv1) # tf.summary.histogram("Cconv2", conv2) # tf.summary.histogram("Cconv3", conv3) # tf.summary.histogram("Cconv4", conv4) # tf.summary.histogram("Cconv5", conv5) # tf.summary.histogram("Cap5", ap5) tf.summary.histogram("Cfc6", fc6) tf.summary.histogram("yc", yc) return xc, yc # placeholder yg_ = tf.placeholder(tf.float32, [None, class_num], name='yg_') #label to generator z_ = tf.placeholder(tf.float32, [None, noise_num], name='z_') #noise to generator xc1_ = tf.placeholder(tf.float32, [None, 28, 28, 1], name='xc1_') #labeled image to classifier xc2_ = tf.placeholder(tf.float32, [None, 28, 28, 1], name='xc2_') #unlabeled image to classifier yd_ = tf.placeholder(tf.float32, [None, class_num], name='yd_') #label to discriminator xd_ = tf.placeholder(tf.float32, [None, 28, 28, 1], name='xd_') #labeled image to discriminator d_dis_g_ = tf.placeholder(tf.float32, [None, 1], name='d_dis_g_') #target of discriminator related to generator d_dis_r_ = tf.placeholder(tf.float32, [None, 1], name='d_dis_r_') #target of discriminator related to real image d_dis_c_ = tf.placeholder(tf.float32, [None, 1], name='d_dis_c_') #target of discriminator related to classifier yc1_ = tf.placeholder(tf.float32, [None, class_num], name='yc1_') #target label of classifier related to real image alpha_p_flag_ = tf.placeholder(tf.float32, name='alpha_p_flag_') #(0,1) apply alpha pseudo or not keep_prob_ = tf.placeholder(tf.float32, name='keep_prob_') #dropout rate # stream around generator x_gen, y_gen = generator(yg_, z_, reuse=False) # stream around classifier x_cla_0, y_cla_0 = classifier(x_gen, keep_prob_, reuse=False) # from generator x_cla_1, y_cla_1 = classifier(xc1_, keep_prob_, reuse=True) # real image labeled x_cla_2, y_cla_2 = classifier(xc2_, keep_prob_, reuse=True) # real image unlabeled # loss_RP = - tf.reduce_mean(y_gen * tf.log(y_cla_0)) #loss in case generated image # loss_RL = - tf.reduce_mean(yc1_ * tf.log(y_cla_1)) #loss in case real image loss_RP = - tf.reduce_mean(y_gen * tf.log(tf.clip_by_value(y_cla_0, 1e-10, 1e+30)), name='Loss_RP') #loss in case generated image loss_RL = - tf.reduce_mean(yc1_ * tf.log(tf.clip_by_value(y_cla_1, 1e-10, 1e+30)), name='Loss_RL') #loss in case real image #stream around discriminator out_dis_g, normL2_1 = discriminator(x_gen, y_gen, reuse=False) #from generator out_dis_r, normL2_2 = discriminator(xd_, yd_, reuse=True) #real image and label out_dis_c, normL2_3 = discriminator(x_cla_2, y_cla_2, reuse=True) #from classifier loss_dis_g = tf.reduce_mean(tf.square(out_dis_g - d_dis_g_), name='Loss_dis_gen') #loss related to generator loss_dis_r = tf.reduce_mean(tf.square(out_dis_r - d_dis_r_), name='Loss_dis_rea') #loss related to real imaeg loss_dis_c = tf.reduce_mean(tf.square(out_dis_c - d_dis_c_), name='Loss_dis_cla') #loss related to classifier norm_L2 = normL2_1 + normL2_2 + normL2_3 #total loss of discriminator loss_dis_total = loss_dis_r + alpha_P * loss_dis_c + (1 - alpha_P) * loss_dis_g + l2_norm_lambda * norm_L2 #total loss of classifier loss_cla_total = alpha_P * loss_dis_c + loss_RL + alpha_p_flag_ * alpha_pseudo * loss_RP #total loss of generator loss_gen_total = (1 - alpha_P) * loss_dis_g # tf.summary.scalar('loss_dis_total', loss_dis_total) # tf.summary.histogram("wc1", wc1) # # tf.summary.histogram("wc2", wc2) # # tf.summary.histogram("wc3", wc3) # # tf.summary.histogram("wc4", wc4) # # tf.summary.histogram("wc5", wc5) # # tf.summary.histogram("wc6", wc6) # # tf.summary.histogram("bc1", bc1) # # tf.summary.histogram("bc2", bc2) # # tf.summary.histogram("bc3", bc3) # # tf.summary.histogram("bc4", bc4) # # tf.summary.histogram("bc5", bc5) # tf.summary.histogram("bc6", bc6) tf.summary.scalar('loss_cla_total', loss_cla_total) tf.summary.scalar('loss_dis_c', loss_dis_c) tf.summary.scalar('loss_RL', loss_RL) tf.summary.scalar('loss_RP', loss_RP) # tf.summary.scalar('loss_gen_total', loss_gen_total) merged = tf.summary.merge_all() # t_vars = tf.trainable_variables() g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="generator") d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="discriminator") c_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="classifier") # d_vars = [var for var in t_vars if 'd' in var.name] # g_vars = [var for var in t_vars if 'g' in var.name] # c_vars = [var for var in t_vars if 'c' in var.name] train_dis = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5).minimize(loss_dis_total, var_list=d_vars # var_list=[wd1, wd2, wd3, wd4, wd5, wd6, bd1, bd2, bd3, bd4, bd5, bd6] , name='Adam_dis') train_gen = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5).minimize(loss_gen_total, var_list=g_vars # var_list=[wg1, wg3, wg5, bg1, bg3, bg5, betag2, scaleg2, betag4, scaleg4] , name='Adam_gen') train_cla = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5).minimize(loss_cla_total, var_list=c_vars # var_list=[wc1, wc2, wc3, wc4, wc5, wc6, bc1, bc2, bc3, bc4, bc5, bc6] , name='Adam_cla') # train_cla = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5).minimize(loss_cla_total, # var_list=[wc1, wc2, wc3, wc4, wc5, wc6, bc1, bc2, bc3, bc4, bc5, bc6, # wg1, wg3, wg5, bg1, bg3, bg5, betag2, scaleg2, betag4, scaleg4] # , name='Adam_cla') sess = tf.Session() sess.run(tf.global_variables_initializer()) summary_writer = tf.summary.FileWriter(board_dir_name, sess.graph) #training loop for epoch in range(0, n_epoch): sum_loss_gen = np.float32(0) sum_loss_dis = np.float32(0) sum_loss_dis_r = np.float32(0) sum_loss_dis_c0 = np.float32(0) sum_loss_dis_g0 = np.float32(0) sum_loss_cla = np.float32(0) sum_accu_cla = np.float32(0) sum_loss_dis_c1 = np.float32(0) sum_loss_RL = np.float32(0) sum_loss_RP = np.float32(0) len_img_real = make_mnist.make_data_for_1_epoch() #debug # print("make_mnist.img_real_1epoch.shape = ", make_mnist.img_real_1epoch.shape) # print("make_mnist.img_cla_1epoch.shape = ", make_mnist.img_cla_1epoch.shape) for i in range(0, len_img_real, batchsize): img_real_batch, img_cla_batch, label_real_batch = make_mnist.get_data_for_1_batch(i, batchsize, alpha_P) #debug # if epoch == 0 and i == 0: # make_mnist.print_img_and_label(img_real_batch, label_real_batch, 7) #cal each batchsize len_real_batch = len(img_real_batch) len_cla_batch = len(img_cla_batch) len_gen_batch = int(len(img_real_batch) * alpha_P) z = np.random.uniform(0, 1, len_gen_batch * noise_num) z = z.reshape(-1, noise_num).astype(np.float32) label_gen_int = np.random.randint(0, class_num, len_gen_batch) label_gen = make_mnist.convert_to_10class_(label_gen_int) d_dis_g_1_ = np.array([1.0], dtype=np.float32).reshape(1, 1) d_dis_g_1 = np.tile(d_dis_g_1_, (len_gen_batch, 1)) d_dis_g_0_ = np.array([0.0], dtype=np.float32).reshape(1, 1) d_dis_g_0 = np.tile(d_dis_g_0_, (len_gen_batch, 1)) d_dis_r_1 = np.array([1.0], dtype=np.float32).reshape(1, 1) d_dis_r = np.tile(d_dis_r_1, (len_real_batch, 1)) d_dis_c_1_ = np.array([1.0], dtype=np.float32).reshape(1, 1) d_dis_c_1 = np.tile(d_dis_c_1_, (len_cla_batch, 1)) d_dis_c_0_ = np.array([0.0], dtype=np.float32).reshape(1, 1) d_dis_c_0 = np.tile(d_dis_c_0_, (len_cla_batch, 1)) #debug # d_vars_ = sess.run(d_vars, feed_dict={z_:z, yg_:label_gen, yd_: label_real_batch, xd_: img_real_batch, # xc2_: img_cla_batch, d_dis_g_: d_dis_g_0, d_dis_r_: d_dis_r_1, # d_dis_c_:d_dis_c_0, keep_prob_:keep_prob_rate}) # # print("d_vars =", d_vars) #train discriminator sess.run(train_dis, feed_dict={z_:z, yg_:label_gen, yd_: label_real_batch, xd_: img_real_batch, xc2_: img_cla_batch, d_dis_g_: d_dis_g_0, d_dis_r_: d_dis_r_1, d_dis_c_:d_dis_c_0, keep_prob_:keep_prob_rate}) #train classifier if epoch > alpha_apply_thr: sess.run(train_cla, feed_dict={z_:z, yg_:label_gen, xc1_: img_real_batch, xc2_: img_cla_batch, yc1_: label_real_batch, d_dis_c_: d_dis_c_1,keep_prob_:keep_prob_rate, alpha_p_flag_:1.0}) else: sess.run(train_cla, feed_dict={z_: z, yg_: label_gen, xc1_: img_real_batch, xc2_: img_cla_batch, yc1_: label_real_batch, d_dis_c_: d_dis_c_1, keep_prob_: keep_prob_rate, alpha_p_flag_: 0.0}) #train generator sess.run(train_gen, feed_dict={z_: z, yg_: label_gen, d_dis_g_: d_dis_g_1}) loss_gen_total_ = sess.run(loss_gen_total, feed_dict={z_:z, yg_:label_gen, d_dis_g_: d_dis_g_1}) loss_dis_total_, loss_dis_r_, loss_dis_g_0, loss_dis_c_0 = sess.run([loss_dis_total, loss_dis_r, loss_dis_g, loss_dis_c], feed_dict={z_:z, yg_:label_gen, yd_: label_real_batch, xd_: img_real_batch, xc2_: img_cla_batch, d_dis_g_: d_dis_g_0, d_dis_r_: d_dis_r_1, d_dis_c_:d_dis_c_0, keep_prob_:1.0}) loss_cla_total_, loss_dis_c_1, loss_RL_, loss_RP_ = sess.run([loss_cla_total, loss_dis_c, loss_RL, loss_RP], feed_dict={z_:z, yg_:label_gen, xc1_: img_real_batch, xc2_: img_cla_batch, yc1_: label_real_batch, d_dis_c_: d_dis_c_1, keep_prob_:1.0, alpha_p_flag_: 0.0}) #for tensorboard merged_ = sess.run(merged, feed_dict={z_:z, yg_:label_gen, xc1_: img_real_batch, xc2_: img_cla_batch,yc1_: label_real_batch, d_dis_c_: d_dis_c_1, keep_prob_:1.0, alpha_p_flag_: 0.0}) summary_writer.add_summary(merged_, epoch) sum_loss_gen += loss_gen_total_ sum_loss_dis += loss_dis_total_ sum_loss_dis_r += loss_dis_r_ sum_loss_dis_c0 += loss_dis_c_0 sum_loss_dis_g0 += loss_dis_g_0 sum_loss_cla += loss_cla_total_ sum_loss_dis_c1 += loss_dis_c_1 sum_loss_RL += loss_RL_ sum_loss_RP += loss_RP_ print("-----------------------------------------------------") print("epoch =", epoch , ", Total Loss of G =", sum_loss_gen, ", Total Loss of D =", sum_loss_dis, ", Total Loss of C =", sum_loss_cla) print("Discriminator: Loss Real =", sum_loss_dis_r, ", Loss C =", sum_loss_dis_c0, ", Loss D =", sum_loss_dis_g0,) print("Classifier: Loss adv =", sum_loss_dis_c1, ", Loss RL =", sum_loss_RL, ", Loss RP =", sum_loss_RP,) if epoch % 10 == 0: sample_num_h = 10 sample_num = sample_num_h ** 2 z_test = np.random.uniform(0, 1, sample_num_h * noise_num).reshape(1, sample_num_h, noise_num) z_test = np.tile(z_test, (sample_num_h, 1, 1)) z_test = z_test.reshape(-1, sample_num).astype(np.float32) label_gen_int = np.arange(10).reshape(10, 1).astype(np.float32) label_gen_int = np.tile(label_gen_int, (1, 10)).reshape(sample_num) label_gen_test = make_mnist.convert_to_10class_(label_gen_int) gen_images = sess.run(x_gen, feed_dict={z_:z_test, yg_:label_gen_test}) Utility.make_output_img(gen_images, sample_num_h, out_image_dir, epoch) # z_only_1 = np.random.uniform(0, 1, noise_num).reshape(1, noise_num) # label_gen_only_1 = np.array([4]).reshape(1, 1).astype(np.float32) # label_gen_only_1_class = make_mnist.convert_to_10class_(label_gen_only_1) # gen_image_1 = sess.run(x_gen, feed_dict={z_:z_only_1, yg_:label_gen_only_1_class}) # # Utility.make_1_img(gen_image_1)
the-stack_0_8219
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Network Hosts are responsible for allocating IPs and setting up network. There are multiple backend drivers that handle specific types of networking topologies. All of the network commands are issued to a subclass of :class:`NetworkManager`. """ import collections import datetime import functools import math import re import uuid import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import six from nova import context from nova import exception from nova.i18n import _, _LI, _LE, _LW from nova import ipv6 from nova import manager from nova.network import api as network_api from nova.network import driver from nova.network import floating_ips from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova.network.security_group import openstack_driver from nova import objects from nova.objects import base as obj_base from nova.objects import quotas as quotas_obj from nova import servicegroup from nova import utils LOG = logging.getLogger(__name__) network_opts = [ cfg.StrOpt('flat_network_bridge', help='Bridge for simple network instances'), cfg.StrOpt('flat_network_dns', default='8.8.4.4', help='DNS server for simple network'), cfg.BoolOpt('flat_injected', default=False, help='Whether to attempt to inject network setup into guest'), cfg.StrOpt('flat_interface', help='FlatDhcp will bridge into this interface if set'), cfg.IntOpt('vlan_start', default=100, min=1, max=4094, help='First VLAN for private networks'), cfg.StrOpt('vlan_interface', help='VLANs will bridge into this interface if set'), cfg.IntOpt('num_networks', default=1, help='Number of networks to support'), cfg.StrOpt('vpn_ip', default='$my_ip', help='Public IP for the cloudpipe VPN servers'), cfg.IntOpt('vpn_start', default=1000, help='First Vpn port for private networks'), cfg.IntOpt('network_size', default=256, help='Number of addresses in each private subnet'), cfg.StrOpt('fixed_range_v6', default='fd00::/48', help='Fixed IPv6 address block'), cfg.StrOpt('gateway', help='Default IPv4 gateway'), cfg.StrOpt('gateway_v6', help='Default IPv6 gateway'), cfg.IntOpt('cnt_vpn_clients', default=0, help='Number of addresses reserved for vpn clients'), cfg.IntOpt('fixed_ip_disassociate_timeout', default=600, help='Seconds after which a deallocated IP is disassociated'), cfg.IntOpt('create_unique_mac_address_attempts', default=5, help='Number of attempts to create unique mac address'), cfg.BoolOpt('fake_call', default=False, help='If True, skip using the queue and make local calls'), cfg.BoolOpt('teardown_unused_network_gateway', default=False, help='If True, unused gateway devices (VLAN and bridge) are ' 'deleted in VLAN network mode with multi hosted ' 'networks'), cfg.BoolOpt('force_dhcp_release', default=True, help='If True, send a dhcp release on instance termination'), cfg.BoolOpt('update_dns_entries', default=False, help='If True, when a DNS entry must be updated, it sends a ' 'fanout cast to all network hosts to update their DNS ' 'entries in multi host mode'), cfg.IntOpt("dns_update_periodic_interval", default=-1, help='Number of seconds to wait between runs of updates to DNS ' 'entries.'), cfg.StrOpt('dhcp_domain', default='novalocal', help='Domain to use for building the hostnames'), cfg.StrOpt('l3_lib', default='nova.network.l3.LinuxNetL3', help="Indicates underlying L3 management library"), ] CONF = cfg.CONF CONF.register_opts(network_opts) CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('network_topic', 'nova.network.rpcapi') CONF.import_opt('fake_network', 'nova.network.linux_net') CONF.import_opt('share_dhcp_address', 'nova.objects.network') CONF.import_opt('network_device_mtu', 'nova.objects.network') class RPCAllocateFixedIP(object): """Mixin class originally for FlatDCHP and VLAN network managers. used since they share code to RPC.call allocate_fixed_ip on the correct network host to configure dnsmasq """ servicegroup_api = None def _allocate_fixed_ips(self, context, instance_id, host, networks, **kwargs): """Calls allocate_fixed_ip once for each network.""" green_threads = [] vpn = kwargs.get('vpn') requested_networks = kwargs.get('requested_networks') addresses_by_network = {} if requested_networks is not None: for request in requested_networks: addresses_by_network[request.network_id] = request.address for network in networks: if 'uuid' in network and network['uuid'] in addresses_by_network: address = addresses_by_network[network['uuid']] else: address = None # NOTE(vish): if we are not multi_host pass to the network host # NOTE(tr3buchet): but if we are, host came from instance.host if not network['multi_host']: host = network['host'] # NOTE(vish): if there is no network host, set one if host is None: host = self.network_rpcapi.set_network_host(context, network) if host != self.host: # need to call allocate_fixed_ip to correct network host green_threads.append(utils.spawn( self.network_rpcapi._rpc_allocate_fixed_ip, context, instance_id, network['id'], address, vpn, host)) else: # i am the correct host, run here self.allocate_fixed_ip(context, instance_id, network, vpn=vpn, address=address) # wait for all of the allocates (if any) to finish for gt in green_threads: gt.wait() def _rpc_allocate_fixed_ip(self, context, instance_id, network_id, **kwargs): """Sits in between _allocate_fixed_ips and allocate_fixed_ip to perform network lookup on the far side of rpc. """ network = self._get_network_by_id(context, network_id) return self.allocate_fixed_ip(context, instance_id, network, **kwargs) def deallocate_fixed_ip(self, context, address, host=None, teardown=True, instance=None): """Call the superclass deallocate_fixed_ip if i'm the correct host otherwise call to the correct host """ fixed_ip = objects.FixedIP.get_by_address( context, address, expected_attrs=['network']) network = fixed_ip.network # NOTE(vish): if we are not multi_host pass to the network host # NOTE(tr3buchet): but if we are, host came from instance.host if not network.multi_host: host = network.host if host == self.host: # NOTE(vish): deallocate the fixed ip locally return super(RPCAllocateFixedIP, self).deallocate_fixed_ip(context, address, instance=instance) if network.multi_host: service = objects.Service.get_by_host_and_binary( context, host, 'nova-network') if not service or not self.servicegroup_api.service_is_up(service): # NOTE(vish): deallocate the fixed ip locally but don't # teardown network devices return super(RPCAllocateFixedIP, self).deallocate_fixed_ip( context, address, teardown=False, instance=instance) self.network_rpcapi.deallocate_fixed_ip(context, address, host, instance) class NetworkManager(manager.Manager): """Implements common network manager functionality. This class must be subclassed to support specific topologies. host management: hosts configure themselves for networks they are assigned to in the table upon startup. If there are networks in the table which do not have hosts, those will be filled in and have hosts configured as the hosts pick them up one at time during their periodic task. The one at a time part is to flatten the layout to help scale """ target = messaging.Target(version='1.16') # If True, this manager requires VIF to create a bridge. SHOULD_CREATE_BRIDGE = False # If True, this manager requires VIF to create VLAN tag. SHOULD_CREATE_VLAN = False # if True, this manager leverages DHCP DHCP = False timeout_fixed_ips = True required_create_args = [] def __init__(self, network_driver=None, *args, **kwargs): self.driver = driver.load_network_driver(network_driver) self.instance_dns_manager = importutils.import_object( CONF.instance_dns_manager) self.instance_dns_domain = CONF.instance_dns_domain self.floating_dns_manager = importutils.import_object( CONF.floating_ip_dns_manager) self.network_api = network_api.API() self.network_rpcapi = network_rpcapi.NetworkAPI() self.security_group_api = ( openstack_driver.get_openstack_security_group_driver()) self.servicegroup_api = servicegroup.API() l3_lib = kwargs.get("l3_lib", CONF.l3_lib) self.l3driver = importutils.import_object(l3_lib) self.quotas_cls = objects.Quotas super(NetworkManager, self).__init__(service_name='network', *args, **kwargs) @staticmethod def _uses_shared_ip(network): shared = network.get('share_address') or CONF.share_dhcp_address return not network.get('multi_host') or shared @utils.synchronized('get_dhcp') def _get_dhcp_ip(self, context, network_ref, host=None): """Get the proper dhcp address to listen on.""" # NOTE(vish): If we are sharing the dhcp_address then we can just # return the dhcp_server from the database. if self._uses_shared_ip(network_ref): return network_ref.get('dhcp_server') or network_ref['gateway'] if not host: host = self.host network_id = network_ref['id'] try: fip = objects.FixedIP.get_by_network_and_host(context, network_id, host) return fip.address except exception.FixedIpNotFoundForNetworkHost: elevated = context.elevated() fip = objects.FixedIP.associate_pool(elevated, network_id, host=host) return fip.address def get_dhcp_leases(self, ctxt, network_ref): """Broker the request to the driver to fetch the dhcp leases.""" LOG.debug('Get DHCP leases for network %s', network_ref['uuid']) return self.driver.get_dhcp_leases(ctxt, network_ref) def init_host(self): """Do any initialization that needs to be run if this is a standalone service. """ # NOTE(vish): Set up networks for which this host already has # an ip address. ctxt = context.get_admin_context() for network in objects.NetworkList.get_by_host(ctxt, self.host): self._setup_network_on_host(ctxt, network) if CONF.update_dns_entries: LOG.debug('Update DNS on network %s for host %s', network['uuid'], self.host) dev = self.driver.get_dev(network) self.driver.update_dns(ctxt, dev, network) LOG.info(_LI('Configured network %(network)s on host %(host)s'), {'network': network['uuid'], 'host': self.host}) @periodic_task.periodic_task def _disassociate_stale_fixed_ips(self, context): if self.timeout_fixed_ips: now = timeutils.utcnow() timeout = CONF.fixed_ip_disassociate_timeout time = now - datetime.timedelta(seconds=timeout) num = objects.FixedIP.disassociate_all_by_timeout(context, self.host, time) if num: LOG.debug('Disassociated %s stale fixed IP(s)', num) def set_network_host(self, context, network_ref): """Safely sets the host of the network.""" # TODO(mriedem): Remove this compat shim when network RPC API version # 1.0 is dropped. if not isinstance(network_ref, obj_base.NovaObject): network_ref = objects.Network._from_db_object( context, objects.Network(), network_ref) LOG.debug('Setting host %s for network %s', self.host, network_ref.uuid, context=context) network_ref.host = self.host network_ref.save() return self.host def _do_trigger_security_group_members_refresh_for_instance(self, instance_id): # NOTE(francois.charlier): the instance may have been deleted already # thus enabling `read_deleted` admin_context = context.get_admin_context(read_deleted='yes') instance = objects.Instance.get_by_uuid(admin_context, instance_id) try: # NOTE(vish): We need to make sure the instance info cache has been # updated with new ip info before we trigger the # security group refresh. This is somewhat inefficient # but avoids doing some dangerous refactoring for a # bug fix. nw_info = self.get_instance_nw_info(admin_context, instance_id, None, None) ic = objects.InstanceInfoCache.new(admin_context, instance_id) ic.network_info = nw_info ic.save(update_cells=False) except exception.InstanceInfoCacheNotFound: pass groups = instance.security_groups group_ids = [group.id for group in groups] self.security_group_api.trigger_members_refresh(admin_context, group_ids) # NOTE(hanlind): This method can be removed in version 2.0 of the RPC API def get_instance_uuids_by_ip_filter(self, context, filters): fixed_ip_filter = filters.get('fixed_ip') ip_filter = re.compile(str(filters.get('ip'))) ipv6_filter = re.compile(str(filters.get('ip6'))) LOG.debug('Get instance uuids by IP filters. Fixed IP filter: %s. ' 'IP filter: %s. IPv6 filter: %s', fixed_ip_filter, str(filters.get('ip')), str(filters.get('ip6'))) # NOTE(jkoelker) Should probably figure out a better way to do # this. But for now it "works", this could suck on # large installs. vifs = objects.VirtualInterfaceList.get_all(context) results = [] for vif in vifs: if vif.instance_uuid is None: continue network = self._get_network_by_id(context, vif.network_id) fixed_ipv6 = None if network['cidr_v6'] is not None: fixed_ipv6 = ipv6.to_global(network['cidr_v6'], vif.address, context.project_id) if fixed_ipv6 and ipv6_filter.match(fixed_ipv6): results.append({'instance_uuid': vif.instance_uuid, 'ip': fixed_ipv6}) fixed_ips = objects.FixedIPList.get_by_virtual_interface_id( context, vif.id) for fixed_ip in fixed_ips: if not fixed_ip or not fixed_ip.address: continue if str(fixed_ip.address) == fixed_ip_filter: results.append({'instance_uuid': vif.instance_uuid, 'ip': fixed_ip.address}) continue if ip_filter.match(str(fixed_ip.address)): results.append({'instance_uuid': vif.instance_uuid, 'ip': fixed_ip.address}) continue for floating_ip in fixed_ip.floating_ips: if not floating_ip or not floating_ip.address: continue if ip_filter.match(str(floating_ip.address)): results.append({'instance_uuid': vif.instance_uuid, 'ip': floating_ip.address}) continue return results def _get_networks_for_instance(self, context, instance_id, project_id, requested_networks=None): """Determine & return which networks an instance should connect to.""" # TODO(tr3buchet) maybe this needs to be updated in the future if # there is a better way to determine which networks # a non-vlan instance should connect to if requested_networks is not None and len(requested_networks) != 0: network_uuids = [request.network_id for request in requested_networks] networks = self._get_networks_by_uuids(context, network_uuids) else: try: networks = objects.NetworkList.get_all(context) except exception.NoNetworksFound: return [] # return only networks which are not vlan networks return [network for network in networks if not network.vlan] def allocate_for_instance(self, context, **kwargs): """Handles allocating the various network resources for an instance. rpc.called by network_api """ instance_uuid = kwargs['instance_id'] if not uuidutils.is_uuid_like(instance_uuid): instance_uuid = kwargs.get('instance_uuid') host = kwargs['host'] project_id = kwargs['project_id'] rxtx_factor = kwargs['rxtx_factor'] requested_networks = kwargs.get('requested_networks') if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) vpn = kwargs['vpn'] macs = kwargs['macs'] admin_context = context.elevated() networks = self._get_networks_for_instance(context, instance_uuid, project_id, requested_networks=requested_networks) networks_list = [self._get_network_dict(network) for network in networks] LOG.debug('Networks retrieved for instance: |%s|', networks_list, context=context, instance_uuid=instance_uuid) try: self._allocate_mac_addresses(admin_context, instance_uuid, networks, macs) except Exception: with excutils.save_and_reraise_exception(): # If we fail to allocate any one mac address, clean up all # allocated VIFs objects.VirtualInterface.delete_by_instance_uuid( context, instance_uuid) self._allocate_fixed_ips(admin_context, instance_uuid, host, networks, vpn=vpn, requested_networks=requested_networks) if CONF.update_dns_entries: network_ids = [network['id'] for network in networks] self.network_rpcapi.update_dns(context, network_ids) net_info = self.get_instance_nw_info(admin_context, instance_uuid, rxtx_factor, host) LOG.info(_LI("Allocated network: '%s' for instance"), net_info, instance_uuid=instance_uuid, context=context) return net_info def deallocate_for_instance(self, context, **kwargs): """Handles deallocating various network resources for an instance. rpc.called by network_api kwargs can contain fixed_ips to circumvent another db lookup """ # NOTE(francois.charlier): in some cases the instance might be # deleted before the IPs are released, so we need to get deleted # instances too read_deleted_context = context.elevated(read_deleted='yes') if 'instance' in kwargs: instance = kwargs['instance'] instance_uuid = instance.uuid host = instance.host else: instance_id = kwargs['instance_id'] if uuidutils.is_uuid_like(instance_id): instance = objects.Instance.get_by_uuid( read_deleted_context, instance_id) else: instance = objects.Instance.get_by_id( read_deleted_context, instance_id) # NOTE(russellb) in case instance_id was an ID and not UUID instance_uuid = instance.uuid host = kwargs.get('host') try: requested_networks = kwargs.get('requested_networks') if requested_networks: # NOTE(obondarev): Temporary and transitional if isinstance(requested_networks, objects.NetworkRequestList): requested_networks = requested_networks.as_tuples() network_ids = set([net_id for (net_id, ip) in requested_networks]) fixed_ips = [ip for (net_id, ip) in requested_networks if ip] else: fixed_ip_list = objects.FixedIPList.get_by_instance_uuid( read_deleted_context, instance_uuid) network_ids = set([str(fixed_ip.network_id) for fixed_ip in fixed_ip_list]) fixed_ips = [str(ip.address) for ip in fixed_ip_list] except exception.FixedIpNotFoundForInstance: network_ids = set([]) fixed_ips = [] LOG.debug("Network deallocation for instance", context=context, instance_uuid=instance_uuid) # deallocate fixed ips for fixed_ip in fixed_ips: self.deallocate_fixed_ip(context, fixed_ip, host=host, instance=instance) if CONF.update_dns_entries: self.network_rpcapi.update_dns(context, list(network_ids)) # deallocate vifs (mac addresses) objects.VirtualInterface.delete_by_instance_uuid( read_deleted_context, instance_uuid) LOG.info(_LI("Network deallocated for instance (fixed IPs: '%s')"), fixed_ips, context=context, instance_uuid=instance_uuid) @messaging.expected_exceptions(exception.InstanceNotFound) def get_instance_nw_info(self, context, instance_id, rxtx_factor, host, instance_uuid=None, **kwargs): """Creates network info list for instance. called by allocate_for_instance and network_api context needs to be elevated :returns: network info list [(network,info),(network,info)...] where network = dict containing pertinent data from a network db object and info = dict containing pertinent networking data """ if not uuidutils.is_uuid_like(instance_id): instance_id = instance_uuid instance_uuid = instance_id LOG.debug('Get instance network info', instance_uuid=instance_uuid) try: fixed_ips = objects.FixedIPList.get_by_instance_uuid( context, instance_uuid) except exception.FixedIpNotFoundForInstance: fixed_ips = [] LOG.debug('Found %d fixed IPs associated to the instance in the ' 'database.', len(fixed_ips), instance_uuid=instance_uuid) nw_info = network_model.NetworkInfo() vifs = collections.OrderedDict() for fixed_ip in fixed_ips: vif = fixed_ip.virtual_interface if not vif: LOG.warning(_LW('No VirtualInterface for FixedIP: %s'), str(fixed_ip.address), instance_uuid=instance_uuid) continue if not fixed_ip.network: LOG.warning(_LW('No Network for FixedIP: %s'), str(fixed_ip.address), instance_uuid=instance_uuid) continue if vif.uuid in vifs: current = vifs[vif.uuid] else: current = { 'id': vif.uuid, 'type': network_model.VIF_TYPE_BRIDGE, 'address': vif.address, } vifs[vif.uuid] = current net_dict = self._get_network_dict(fixed_ip.network) network = network_model.Network(**net_dict) subnets = self._get_subnets_from_network(context, fixed_ip.network, host) network['subnets'] = subnets current['network'] = network try: current['rxtx_cap'] = (fixed_ip.network['rxtx_base'] * rxtx_factor) except (TypeError, KeyError): pass if fixed_ip.network.cidr_v6 and vif.address: # NOTE(vish): I strongly suspect the v6 subnet is not used # anywhere, but support it just in case # add the v6 address to the v6 subnet address = ipv6.to_global(fixed_ip.network.cidr_v6, vif.address, fixed_ip.network.project_id) model_ip = network_model.FixedIP(address=address) current['network']['subnets'][1]['ips'].append(model_ip) # add the v4 address to the v4 subnet model_ip = network_model.FixedIP(address=str(fixed_ip.address)) for ip in fixed_ip.floating_ips: floating_ip = network_model.IP(address=str(ip['address']), type='floating') model_ip.add_floating_ip(floating_ip) current['network']['subnets'][0]['ips'].append(model_ip) for vif in vifs.values(): nw_info.append(network_model.VIF(**vif)) LOG.debug('Built network info: |%s|', nw_info, instance_uuid=instance_uuid) return nw_info @staticmethod def _get_network_dict(network): """Returns the dict representing necessary and meta network fields.""" # get generic network fields network_dict = {'id': network['uuid'], 'bridge': network['bridge'], 'label': network['label'], 'tenant_id': network['project_id']} # get extra information if network.get('injected'): network_dict['injected'] = network['injected'] return network_dict @staticmethod def _extract_subnets(network): """Returns information about the IPv4 and IPv6 subnets associated with a Neutron Network UUID. """ subnet_v4 = { 'network_id': network.uuid, 'cidr': network.cidr, 'gateway': network.gateway, 'dhcp_server': getattr(network, 'dhcp_server'), 'broadcast': network.broadcast, 'netmask': network.netmask, 'version': 4, 'dns1': network.dns1, 'dns2': network.dns2} # TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4. # this is probably bad as there is no way to add v6 # dns to nova subnet_v6 = { 'network_id': network.uuid, 'cidr': network.cidr_v6, 'gateway': network.gateway_v6, 'dhcp_server': None, 'broadcast': None, 'netmask': network.netmask_v6, 'version': 6, 'dns1': None, 'dns2': None} def ips_to_strs(net): for key, value in net.items(): if isinstance(value, netaddr.ip.BaseIP): net[key] = str(value) return net return [ips_to_strs(subnet_v4), ips_to_strs(subnet_v6)] def _get_subnets_from_network(self, context, network, instance_host=None): """Returns the 1 or 2 possible subnets for a nova network.""" extracted_subnets = self._extract_subnets(network) subnets = [] for subnet in extracted_subnets: subnet_dict = {'cidr': subnet['cidr'], 'gateway': network_model.IP( address=subnet['gateway'], type='gateway')} # deal with dhcp if self.DHCP: if network.get('multi_host'): dhcp_server = self._get_dhcp_ip(context, network, instance_host) else: dhcp_server = self._get_dhcp_ip(context, subnet) subnet_dict['dhcp_server'] = dhcp_server subnet_object = network_model.Subnet(**subnet_dict) # add dns info for k in ['dns1', 'dns2']: if subnet.get(k): subnet_object.add_dns( network_model.IP(address=subnet[k], type='dns')) subnet_object['ips'] = [] subnets.append(subnet_object) return subnets def _allocate_mac_addresses(self, context, instance_uuid, networks, macs): """Generates mac addresses and creates vif rows in db for them.""" # make a copy we can mutate if macs is not None: available_macs = set(macs) for network in networks: if macs is None: self._add_virtual_interface(context, instance_uuid, network['id']) else: try: mac = available_macs.pop() except KeyError: raise exception.VirtualInterfaceCreateException() self._add_virtual_interface(context, instance_uuid, network['id'], mac) def _add_virtual_interface(self, context, instance_uuid, network_id, mac=None): attempts = 1 if mac else CONF.create_unique_mac_address_attempts for i in range(attempts): try: vif = objects.VirtualInterface(context) vif.address = mac or utils.generate_mac_address() vif.instance_uuid = instance_uuid vif.network_id = network_id vif.uuid = str(uuid.uuid4()) vif.create() return vif except exception.VirtualInterfaceCreateException: # Try again up to max number of attempts pass raise exception.VirtualInterfaceMacAddressException() def add_fixed_ip_to_instance(self, context, instance_id, host, network_id, rxtx_factor=None): """Adds a fixed IP to an instance from specified network.""" if uuidutils.is_uuid_like(network_id): network = self.get_network(context, network_id) else: network = self._get_network_by_id(context, network_id) LOG.debug('Add fixed IP on network %s', network['uuid'], instance_uuid=instance_id) self._allocate_fixed_ips(context, instance_id, host, [network]) return self.get_instance_nw_info(context, instance_id, rxtx_factor, host) # NOTE(russellb) This method can be removed in 2.0 of this API. It is # deprecated in favor of the method in the base API. def get_backdoor_port(self, context): """Return backdoor port for eventlet_backdoor.""" return self.backdoor_port def remove_fixed_ip_from_instance(self, context, instance_id, host, address, rxtx_factor=None): """Removes a fixed IP from an instance from specified network.""" LOG.debug('Remove fixed IP %s', address, instance_uuid=instance_id) fixed_ips = objects.FixedIPList.get_by_instance_uuid(context, instance_id) for fixed_ip in fixed_ips: if str(fixed_ip.address) == address: self.deallocate_fixed_ip(context, address, host) # NOTE(vish): this probably isn't a dhcp ip so just # deallocate it now. In the extremely rare # case that this is a race condition, we # will just get a warn in lease or release. if not fixed_ip.leased: fixed_ip.disassociate() return self.get_instance_nw_info(context, instance_id, rxtx_factor, host) raise exception.FixedIpNotFoundForSpecificInstance( instance_uuid=instance_id, ip=address) def _validate_instance_zone_for_dns_domain(self, context, instance): if not self.instance_dns_domain: return True instance_domain = self.instance_dns_domain domainref = objects.DNSDomain.get_by_domain(context, instance_domain) if domainref is None: LOG.warning(_LW('instance-dns-zone not found |%s|.'), instance_domain, instance=instance) return True dns_zone = domainref.availability_zone instance_zone = instance.get('availability_zone') if dns_zone and (dns_zone != instance_zone): LOG.warning(_LW('instance-dns-zone is |%(domain)s|, ' 'which is in availability zone |%(zone)s|. ' 'Instance is in zone |%(zone2)s|. ' 'No DNS record will be created.'), {'domain': instance_domain, 'zone': dns_zone, 'zone2': instance_zone}, instance=instance) return False else: return True def allocate_fixed_ip(self, context, instance_id, network, **kwargs): """Gets a fixed IP from the pool.""" # TODO(vish): when this is called by compute, we can associate compute # with a network, or a cluster of computes with a network # and use that network here with a method like # network_get_by_compute_host address = None # NOTE(vish) This db query could be removed if we pass az and name # (or the whole instance object). instance = objects.Instance.get_by_uuid(context, instance_id) LOG.debug('Allocate fixed IP on network %s', network['uuid'], instance=instance) # A list of cleanup functions to call on error cleanup = [] # Check the quota; can't put this in the API because we get # called into from other places quotas = self.quotas_cls(context=context) quota_project, quota_user = quotas_obj.ids_from_instance(context, instance) try: quotas.reserve(fixed_ips=1, project_id=quota_project, user_id=quota_user) cleanup.append(functools.partial(quotas.rollback, context)) except exception.OverQuota as exc: usages = exc.kwargs['usages'] used = (usages['fixed_ips']['in_use'] + usages['fixed_ips']['reserved']) LOG.warning(_LW("Quota exceeded for project %(pid)s, tried to " "allocate fixed IP. %(used)s of %(allowed)s are " "in use or are already reserved."), {'pid': quota_project, 'used': used, 'allowed': exc.kwargs['quotas']['fixed_ips']}, instance_uuid=instance_id) raise exception.FixedIpLimitExceeded() try: if network['cidr']: # NOTE(mriedem): allocate the vif before associating the # instance to reduce a race window where a previous instance # was associated with the fixed IP and has released it, because # release_fixed_ip will disassociate if allocated is False. vif = objects.VirtualInterface.get_by_instance_and_network( context, instance_id, network['id']) if vif is None: LOG.debug('vif for network %(network)s is used up, ' 'trying to create new vif', {'network': network['id']}, instance=instance) vif = self._add_virtual_interface(context, instance_id, network['id']) address = kwargs.get('address', None) if address: LOG.debug('Associating instance with specified fixed IP ' '%(address)s in network %(network)s on subnet ' '%(cidr)s.' % {'address': address, 'network': network['id'], 'cidr': network['cidr']}, instance=instance) fip = objects.FixedIP.associate( context, str(address), instance_id, network['id'], vif_id=vif.id) else: LOG.debug('Associating instance with fixed IP from pool ' 'in network %(network)s on subnet %(cidr)s.' % {'network': network['id'], 'cidr': network['cidr']}, instance=instance) fip = objects.FixedIP.associate_pool( context.elevated(), network['id'], instance_id, vif_id=vif.id) LOG.debug('Associated instance with fixed IP: %s', fip, instance=instance) address = str(fip.address) cleanup.append(functools.partial(fip.disassociate, context)) LOG.debug('Refreshing security group members for instance.', instance=instance) self._do_trigger_security_group_members_refresh_for_instance( instance_id) cleanup.append(functools.partial( self._do_trigger_security_group_members_refresh_for_instance, # noqa instance_id)) name = instance.display_name if self._validate_instance_zone_for_dns_domain(context, instance): self.instance_dns_manager.create_entry( name, str(fip.address), "A", self.instance_dns_domain) cleanup.append(functools.partial( self.instance_dns_manager.delete_entry, name, self.instance_dns_domain)) self.instance_dns_manager.create_entry( instance_id, str(fip.address), "A", self.instance_dns_domain) cleanup.append(functools.partial( self.instance_dns_manager.delete_entry, instance_id, self.instance_dns_domain)) LOG.debug('Setting up network %(network)s on host %(host)s.' % {'network': network['id'], 'host': self.host}, instance=instance) self._setup_network_on_host(context, network) cleanup.append(functools.partial( self._teardown_network_on_host, context, network)) quotas.commit() if address is None: # TODO(mriedem): should _setup_network_on_host return the addr? LOG.debug('Fixed IP is setup on network %s but not returning ' 'the specific IP from the base network manager.', network['uuid'], instance=instance) else: LOG.debug('Allocated fixed IP %s on network %s', address, network['uuid'], instance=instance) return address except Exception: with excutils.save_and_reraise_exception(): for f in cleanup: try: f() except Exception: LOG.warning(_LW('Error cleaning up fixed IP ' 'allocation. Manual cleanup may ' 'be required.'), exc_info=True) def deallocate_fixed_ip(self, context, address, host=None, teardown=True, instance=None): """Returns a fixed IP to the pool.""" fixed_ip_ref = objects.FixedIP.get_by_address( context, address, expected_attrs=['network']) instance_uuid = fixed_ip_ref.instance_uuid vif_id = fixed_ip_ref.virtual_interface_id LOG.debug('Deallocate fixed IP %s', address, instance_uuid=instance_uuid) if not instance: # NOTE(vish) This db query could be removed if we pass az and name # (or the whole instance object). # NOTE(danms) We can't use fixed_ip_ref.instance because # instance may be deleted and the relationship # doesn't extend to deleted instances instance = objects.Instance.get_by_uuid( context.elevated(read_deleted='yes'), instance_uuid) quotas = self.quotas_cls(context=context) quota_project, quota_user = quotas_obj.ids_from_instance(context, instance) try: quotas.reserve(fixed_ips=-1, project_id=quota_project, user_id=quota_user) except Exception: LOG.exception(_LE("Failed to update usages deallocating " "fixed IP")) try: self._do_trigger_security_group_members_refresh_for_instance( instance_uuid) if self._validate_instance_zone_for_dns_domain(context, instance): for n in self.instance_dns_manager.get_entries_by_address( address, self.instance_dns_domain): self.instance_dns_manager.delete_entry(n, self.instance_dns_domain) fixed_ip_ref.allocated = False fixed_ip_ref.save() if teardown: network = fixed_ip_ref.network if CONF.force_dhcp_release: dev = self.driver.get_dev(network) # NOTE(vish): The below errors should never happen, but # there may be a race condition that is causing # them per # https://code.launchpad.net/bugs/968457, # so we log a message to help track down # the possible race. if not vif_id: LOG.info(_LI("Unable to release %s because vif " "doesn't exist"), address) return vif = objects.VirtualInterface.get_by_id(context, vif_id) if not vif: LOG.info(_LI("Unable to release %s because vif " "object doesn't exist"), address) return # NOTE(cfb): Call teardown before release_dhcp to ensure # that the IP can't be re-leased after a release # packet is sent. self._teardown_network_on_host(context, network) # NOTE(vish): This forces a packet so that the # release_fixed_ip callback will # get called by nova-dhcpbridge. try: self.driver.release_dhcp(dev, address, vif.address) except exception.NetworkDhcpReleaseFailed: LOG.error(_LE("Error releasing DHCP for IP %(address)s" " with MAC %(mac_address)s"), {'address': address, 'mac_address': vif.address}, instance=instance) # NOTE(yufang521247): This is probably a failed dhcp fixed # ip. DHCPRELEASE packet sent to dnsmasq would not trigger # dhcp-bridge to run. Thus it is better to disassociate # such fixed ip here. fixed_ip_ref = objects.FixedIP.get_by_address( context, address) if (instance_uuid == fixed_ip_ref.instance_uuid and not fixed_ip_ref.leased): LOG.debug('Explicitly disassociating fixed IP %s from ' 'instance.', address, instance_uuid=instance_uuid) fixed_ip_ref.disassociate() else: # We can't try to free the IP address so just call teardown self._teardown_network_on_host(context, network) except Exception: with excutils.save_and_reraise_exception(): try: quotas.rollback() except Exception: LOG.warning(_LW("Failed to rollback quota for " "deallocate fixed IP: %s"), address, instance=instance) # Commit the reservations quotas.commit() def lease_fixed_ip(self, context, address): """Called by dhcp-bridge when IP is leased.""" LOG.debug('Leased IP |%s|', address, context=context) fixed_ip = objects.FixedIP.get_by_address(context, address) if fixed_ip.instance_uuid is None: LOG.warning(_LW('IP %s leased that is not associated'), fixed_ip, context=context) return fixed_ip.leased = True fixed_ip.save() if not fixed_ip.allocated: LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), fixed_ip, context=context, instance_uuid=fixed_ip.instance_uuid) def release_fixed_ip(self, context, address, mac=None): """Called by dhcp-bridge when IP is released.""" LOG.debug('Released IP |%s|', address, context=context) fixed_ip = objects.FixedIP.get_by_address(context, address) if fixed_ip.instance_uuid is None: LOG.warning(_LW('IP %s released that is not associated'), fixed_ip, context=context) return if not fixed_ip.leased: LOG.warning(_LW('IP %s released that was not leased'), fixed_ip, context=context, instance_uuid=fixed_ip.instance_uuid) else: fixed_ip.leased = False fixed_ip.save() if not fixed_ip.allocated: # NOTE(mriedem): Sometimes allocate_fixed_ip will associate the # fixed IP to a new instance while an old associated instance is # being deallocated. So we check to see if the mac is for the VIF # that is associated to the instance that is currently associated # with the fixed IP because if it's not, we hit this race and # should ignore the request so we don't disassociate the fixed IP # from the wrong instance. if mac: LOG.debug('Checking to see if virtual interface with MAC ' '%(mac)s is still associated to instance.', {'mac': mac}, instance_uuid=fixed_ip.instance_uuid) vif = objects.VirtualInterface.get_by_address(context, mac) if vif: LOG.debug('Found VIF: %s', vif, instance_uuid=fixed_ip.instance_uuid) if vif.instance_uuid != fixed_ip.instance_uuid: LOG.info(_LI("Ignoring request to release fixed IP " "%(address)s with MAC %(mac)s since it " "is now associated with a new instance " "that is in the process of allocating " "it's network."), {'address': address, 'mac': mac}, instance_uuid=fixed_ip.instance_uuid) return else: LOG.debug('No VIF was found for MAC: %s', mac, instance_uuid=fixed_ip.instance_uuid) LOG.debug('Disassociating fixed IP %s from instance.', address, instance_uuid=fixed_ip.instance_uuid) fixed_ip.disassociate() @staticmethod def _convert_int_args(kwargs): int_args = ("network_size", "num_networks", "vlan_start", "vpn_start") for key in int_args: try: value = kwargs.get(key) if value is None: continue kwargs[key] = int(value) except ValueError: raise exception.InvalidIntValue(key=key) def create_networks(self, context, label, cidr=None, multi_host=None, num_networks=None, network_size=None, cidr_v6=None, gateway=None, gateway_v6=None, bridge=None, bridge_interface=None, dns1=None, dns2=None, fixed_cidr=None, allowed_start=None, allowed_end=None, **kwargs): arg_names = ("label", "cidr", "multi_host", "num_networks", "network_size", "cidr_v6", "gateway", "gateway_v6", "bridge", "bridge_interface", "dns1", "dns2", "fixed_cidr", "allowed_start", "allowed_end") if 'mtu' not in kwargs: kwargs['mtu'] = CONF.network_device_mtu if 'dhcp_server' not in kwargs: kwargs['dhcp_server'] = gateway if 'enable_dhcp' not in kwargs: kwargs['enable_dhcp'] = True if 'share_address' not in kwargs: kwargs['share_address'] = CONF.share_dhcp_address for name in arg_names: kwargs[name] = locals()[name] self._convert_int_args(kwargs) # check for certain required inputs # NOTE: We can remove this check after v2.0 API code is removed because # jsonschema has checked already before this. label = kwargs["label"] if not label: raise exception.NetworkNotCreated(req="label") # Size of "label" column in nova.networks is 255, hence the restriction # NOTE: We can remove this check after v2.0 API code is removed because # jsonschema has checked already before this. if len(label) > 255: raise exception.LabelTooLong() # NOTE: We can remove this check after v2.0 API code is removed because # jsonschema has checked already before this. if not (kwargs["cidr"] or kwargs["cidr_v6"]): raise exception.NetworkNotCreated(req="cidr or cidr_v6") kwargs["bridge"] = kwargs["bridge"] or CONF.flat_network_bridge kwargs["bridge_interface"] = (kwargs["bridge_interface"] or CONF.flat_interface) for fld in self.required_create_args: if not kwargs[fld]: raise exception.NetworkNotCreated(req=fld) if kwargs["cidr_v6"]: # NOTE(vish): just for validation try: netaddr.IPNetwork(kwargs["cidr_v6"]) except netaddr.AddrFormatError: raise exception.InvalidCidr(cidr=kwargs["cidr_v6"]) if kwargs["cidr"]: try: fixnet = netaddr.IPNetwork(kwargs["cidr"]) except netaddr.AddrFormatError: raise exception.InvalidCidr(cidr=kwargs["cidr"]) kwargs["num_networks"] = kwargs["num_networks"] or CONF.num_networks if not kwargs["network_size"]: if kwargs["cidr"]: each_subnet_size = fixnet.size / kwargs["num_networks"] if each_subnet_size > CONF.network_size: subnet = 32 - int(math.log(CONF.network_size, 2)) oversize_msg = _LW( 'Subnet(s) too large, defaulting to /%s.' ' To override, specify network_size flag.') % subnet LOG.warning(oversize_msg) kwargs["network_size"] = CONF.network_size else: kwargs["network_size"] = fixnet.size else: kwargs["network_size"] = CONF.network_size kwargs["multi_host"] = ( CONF.multi_host if kwargs["multi_host"] is None else strutils.bool_from_string(kwargs["multi_host"])) kwargs["vlan_start"] = kwargs.get("vlan_start") or CONF.vlan_start kwargs["vpn_start"] = kwargs.get("vpn_start") or CONF.vpn_start kwargs["dns1"] = kwargs["dns1"] or CONF.flat_network_dns if kwargs["fixed_cidr"]: try: kwargs["fixed_cidr"] = netaddr.IPNetwork(kwargs["fixed_cidr"]) except netaddr.AddrFormatError: raise exception.InvalidCidr(cidr=kwargs["fixed_cidr"]) # Subnet of fixed IPs must fall within fixed range if kwargs["fixed_cidr"] not in fixnet: raise exception.AddressOutOfRange( address=kwargs["fixed_cidr"].network, cidr=fixnet) LOG.debug('Create network: |%s|', kwargs) return self._do_create_networks(context, **kwargs) @staticmethod def _index_of(subnet, ip): try: start = netaddr.IPAddress(ip) except netaddr.AddrFormatError: raise exception.InvalidAddress(address=ip) index = start.value - subnet.value if index < 0 or index >= subnet.size: raise exception.AddressOutOfRange(address=ip, cidr=str(subnet)) return index def _validate_cidr(self, context, nets, subnets_v4, fixed_net_v4): used_subnets = [net.cidr for net in nets] def find_next(subnet): next_subnet = subnet.next() while next_subnet in subnets_v4: next_subnet = next_subnet.next() if next_subnet in fixed_net_v4: return next_subnet for subnet in list(subnets_v4): if subnet in used_subnets: next_subnet = find_next(subnet) if next_subnet: subnets_v4.remove(subnet) subnets_v4.append(next_subnet) subnet = next_subnet else: raise exception.CidrConflict(cidr=subnet, other=subnet) for used_subnet in used_subnets: if subnet in used_subnet: raise exception.CidrConflict(cidr=subnet, other=used_subnet) if used_subnet in subnet: next_subnet = find_next(subnet) if next_subnet: subnets_v4.remove(subnet) subnets_v4.append(next_subnet) subnet = next_subnet else: raise exception.CidrConflict(cidr=subnet, other=used_subnet) def _do_create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, fixed_cidr=None, mtu=None, dhcp_server=None, enable_dhcp=None, share_address=None, allowed_start=None, allowed_end=None, **kwargs): """Create networks based on parameters.""" # NOTE(jkoelker): these are dummy values to make sure iter works # TODO(tr3buchet): disallow carving up networks fixed_net_v4 = netaddr.IPNetwork('0/32') fixed_net_v6 = netaddr.IPNetwork('::0/128') subnets_v4 = [] subnets_v6 = [] if kwargs.get('ipam'): if cidr_v6: subnets_v6 = [netaddr.IPNetwork(cidr_v6)] if cidr: subnets_v4 = [netaddr.IPNetwork(cidr)] else: subnet_bits = int(math.ceil(math.log(network_size, 2))) if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) prefixlen_v6 = 128 - subnet_bits # smallest subnet in IPv6 ethernet network is /64 if prefixlen_v6 > 64: prefixlen_v6 = 64 subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks) if cidr: fixed_net_v4 = netaddr.IPNetwork(cidr) prefixlen_v4 = 32 - subnet_bits subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4, count=num_networks)) if cidr: # NOTE(jkoelker): This replaces the _validate_cidrs call and # prevents looping multiple times try: nets = objects.NetworkList.get_all(context) except exception.NoNetworksFound: nets = [] num_used_nets = len(nets) self._validate_cidr(context, nets, subnets_v4, fixed_net_v4) networks = objects.NetworkList(context=context, objects=[]) subnets = six.moves.zip_longest(subnets_v4, subnets_v6) for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = objects.Network(context=context) uuid = kwargs.get('uuid') if uuid: net.uuid = uuid net.bridge = bridge net.bridge_interface = bridge_interface net.multi_host = multi_host net.dns1 = dns1 net.dns2 = dns2 net.mtu = mtu net.enable_dhcp = enable_dhcp net.share_address = share_address net.project_id = kwargs.get('project_id') if num_networks > 1: net.label = '%s_%d' % (label, index) else: net.label = label bottom_reserved = self._bottom_reserved_ips top_reserved = self._top_reserved_ips extra_reserved = [] if cidr and subnet_v4: current = subnet_v4[1] if allowed_start: val = self._index_of(subnet_v4, allowed_start) current = netaddr.IPAddress(allowed_start) bottom_reserved = val if allowed_end: val = self._index_of(subnet_v4, allowed_end) top_reserved = subnet_v4.size - 1 - val net.cidr = str(subnet_v4) net.netmask = str(subnet_v4.netmask) net.broadcast = str(subnet_v4.broadcast) if gateway: net.gateway = gateway else: net.gateway = current current += 1 net.dhcp_server = dhcp_server or net.gateway net.dhcp_start = current current += 1 if net.dhcp_start == net.dhcp_server: net.dhcp_start = current extra_reserved.append(str(net.dhcp_server)) extra_reserved.append(str(net.gateway)) if cidr_v6 and subnet_v6: net.cidr_v6 = str(subnet_v6) if gateway_v6: # use a pre-defined gateway if one is provided net.gateway_v6 = str(gateway_v6) else: net.gateway_v6 = str(subnet_v6[1]) net.netmask_v6 = str(subnet_v6.netmask) if CONF.network_manager == 'nova.network.manager.VlanManager': vlan = kwargs.get('vlan', None) if not vlan: index_vlan = index + num_used_nets vlan = kwargs['vlan_start'] + index_vlan used_vlans = [x.vlan for x in nets] if vlan in used_vlans: # That vlan is used, try to get another one used_vlans.sort() vlan = used_vlans[-1] + 1 net.vpn_private_address = net.dhcp_start extra_reserved.append(str(net.vpn_private_address)) net.dhcp_start = net.dhcp_start + 1 net.vlan = vlan net.bridge = 'br%s' % vlan # NOTE(vish): This makes ports unique across the cloud, a more # robust solution would be to make them uniq per ip index_vpn = index + num_used_nets net.vpn_public_port = kwargs['vpn_start'] + index_vpn net.create() networks.objects.append(net) if cidr and subnet_v4: self._create_fixed_ips(context, net.id, fixed_cidr, extra_reserved, bottom_reserved, top_reserved) # NOTE(danms): Remove this in RPC API v2.0 return obj_base.obj_to_primitive(networks) def delete_network(self, context, fixed_range, uuid, require_disassociated=True): # Prefer uuid but we'll also take cidr for backwards compatibility elevated = context.elevated() if uuid: network = objects.Network.get_by_uuid(elevated, uuid) elif fixed_range: network = objects.Network.get_by_cidr(elevated, fixed_range) LOG.debug('Delete network %s', network['uuid']) if require_disassociated and network.project_id is not None: raise exception.NetworkHasProject(project_id=network.project_id) network.destroy() @property def _bottom_reserved_ips(self): """Number of reserved IPs at the bottom of the range.""" return 2 # network, gateway @property def _top_reserved_ips(self): """Number of reserved IPs at the top of the range.""" return 1 # broadcast def _create_fixed_ips(self, context, network_id, fixed_cidr=None, extra_reserved=None, bottom_reserved=0, top_reserved=0): """Create all fixed IPs for network.""" network = self._get_network_by_id(context, network_id) if extra_reserved is None: extra_reserved = [] if not fixed_cidr: fixed_cidr = netaddr.IPNetwork(network['cidr']) num_ips = len(fixed_cidr) ips = [] for index in range(num_ips): address = str(fixed_cidr[index]) if (index < bottom_reserved or num_ips - index <= top_reserved or address in extra_reserved): reserved = True else: reserved = False ips.append({'network_id': network_id, 'address': address, 'reserved': reserved}) objects.FixedIPList.bulk_create(context, ips) def _allocate_fixed_ips(self, context, instance_id, host, networks, **kwargs): """Calls allocate_fixed_ip once for each network.""" raise NotImplementedError() def setup_networks_on_host(self, context, instance_id, host, instance=None, teardown=False): """calls setup/teardown on network hosts for an instance.""" green_threads = [] if teardown: call_func = self._teardown_network_on_host else: call_func = self._setup_network_on_host if instance is None: instance = objects.Instance.get_by_id(context, instance_id) vifs = objects.VirtualInterfaceList.get_by_instance_uuid( context, instance.uuid) LOG.debug('Setup networks on host', instance=instance) for vif in vifs: network = objects.Network.get_by_id(context, vif.network_id) if not network.multi_host: # NOTE (tr3buchet): if using multi_host, host is instance.host host = network['host'] if self.host == host or host is None: # at this point i am the correct host, or host doesn't # matter -> FlatManager call_func(context, network) else: # i'm not the right host, run call on correct host green_threads.append(utils.spawn( self.network_rpcapi.rpc_setup_network_on_host, context, network.id, teardown, host)) # wait for all of the setups (if any) to finish for gt in green_threads: gt.wait() def rpc_setup_network_on_host(self, context, network_id, teardown): if teardown: call_func = self._teardown_network_on_host else: call_func = self._setup_network_on_host # subcall from original setup_networks_on_host network = objects.Network.get_by_id(context, network_id) call_func(context, network) def _initialize_network(self, network): if network.enable_dhcp: is_ext = (network.dhcp_server is not None and network.dhcp_server != network.gateway) self.l3driver.initialize_network(network.cidr, is_ext) self.l3driver.initialize_gateway(network) def _setup_network_on_host(self, context, network): """Sets up network on this host.""" raise NotImplementedError() def _teardown_network_on_host(self, context, network): """Sets up network on this host.""" raise NotImplementedError() def validate_networks(self, context, networks): """check if the networks exists and host is set to each network. """ LOG.debug('Validate networks') if networks is None or len(networks) == 0: return for network_uuid, address in networks: # check if the fixed IP address is valid and # it actually belongs to the network if address is not None: if not netutils.is_valid_ip(address): raise exception.FixedIpInvalid(address=address) fixed_ip_ref = objects.FixedIP.get_by_address( context, address, expected_attrs=['network']) network = fixed_ip_ref.network if network.uuid != network_uuid: raise exception.FixedIpNotFoundForNetwork( address=address, network_uuid=network_uuid) if fixed_ip_ref.instance_uuid is not None: raise exception.FixedIpAlreadyInUse( address=address, instance_uuid=fixed_ip_ref.instance_uuid) def _get_network_by_id(self, context, network_id): return objects.Network.get_by_id(context, network_id, project_only='allow_none') def _get_networks_by_uuids(self, context, network_uuids): networks = objects.NetworkList.get_by_uuids( context, network_uuids, project_only="allow_none") networks.sort(key=lambda x: network_uuids.index(x.uuid)) return networks def get_vifs_by_instance(self, context, instance_id): """Returns the vifs associated with an instance.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. instance = objects.Instance.get_by_id(context, instance_id) LOG.debug('Get VIFs for instance', instance=instance) # NOTE(russellb) No need to object-ify this since # get_vifs_by_instance() is unused and set to be removed. vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context, instance.uuid) for vif in vifs: if vif.network_id is not None: network = self._get_network_by_id(context, vif.network_id) vif.net_uuid = network.uuid return [dict(vif) for vif in vifs] def get_instance_id_by_floating_address(self, context, address): """Returns the instance id a floating IP's fixed IP is allocated to.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. LOG.debug('Get instance for floating address %s', address) fixed_ip = objects.FixedIP.get_by_floating_address(context, address) if fixed_ip is None: return None else: return fixed_ip.instance_uuid def get_network(self, context, network_uuid): # NOTE(vish): used locally return objects.Network.get_by_uuid(context.elevated(), network_uuid) def get_all_networks(self, context): # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. try: return obj_base.obj_to_primitive( objects.NetworkList.get_all(context)) except exception.NoNetworksFound: return [] def disassociate_network(self, context, network_uuid): # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. network = self.get_network(context, network_uuid) network.disassociate(context, network.id) def get_fixed_ip(self, context, id): """Return a fixed IP.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. return objects.FixedIP.get_by_id(context, id) def get_fixed_ip_by_address(self, context, address): # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. return objects.FixedIP.get_by_address(context, address) def get_vif_by_mac_address(self, context, mac_address): """Returns the vifs record for the mac_address.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. # NOTE(russellb) No need to object-ify this since # get_vifs_by_instance() is unused and set to be removed. vif = objects.VirtualInterface.get_by_address(context, mac_address) if vif.network_id is not None: network = self._get_network_by_id(context, vif.network_id) vif.net_uuid = network.uuid return vif @periodic_task.periodic_task( spacing=CONF.dns_update_periodic_interval) def _periodic_update_dns(self, context): """Update local DNS entries of all networks on this host.""" networks = objects.NetworkList.get_by_host(context, self.host) for network in networks: dev = self.driver.get_dev(network) self.driver.update_dns(context, dev, network) def update_dns(self, context, network_ids): """Called when fixed IP is allocated or deallocated.""" if CONF.fake_network: return LOG.debug('Update DNS for network ids: %s', network_ids) networks = [network for network in objects.NetworkList.get_by_host(context, self.host) if network.multi_host and network.id in network_ids] for network in networks: dev = self.driver.get_dev(network) self.driver.update_dns(context, dev, network) def add_network_to_project(self, ctxt, project_id, network_uuid): raise NotImplementedError() class FlatManager(NetworkManager): """Basic network where no vlans are used. FlatManager does not do any bridge or vlan creation. The user is responsible for setting up whatever bridges are specified when creating networks through nova-manage. This bridge needs to be created on all compute hosts. The idea is to create a single network for the host with a command like: nova-manage network create 192.168.0.0/24 1 256. Creating multiple networks for one manager is currently not supported, but could be added by modifying allocate_fixed_ip and get_network to get the network with new logic. Arbitrary lists of addresses in a single network can be accomplished with manual db editing. If flat_injected is True, the compute host will attempt to inject network config into the guest. It attempts to modify /etc/network/interfaces and currently only works on debian based systems. To support a wider range of OSes, some other method may need to be devised to let the guest know which IP it should be using so that it can configure itself. Perhaps an attached disk or serial device with configuration info. Metadata forwarding must be handled by the gateway, and since nova does not do any setup in this mode, it must be done manually. Requests to 169.254.169.254 port 80 will need to be forwarded to the api server. """ timeout_fixed_ips = False required_create_args = ['bridge'] def _allocate_fixed_ips(self, context, instance_id, host, networks, **kwargs): """Calls allocate_fixed_ip once for each network.""" requested_networks = kwargs.get('requested_networks') addresses_by_network = {} if requested_networks is not None: for request in requested_networks: addresses_by_network[request.network_id] = request.address for network in networks: if network['uuid'] in addresses_by_network: address = addresses_by_network[network['uuid']] else: address = None self.allocate_fixed_ip(context, instance_id, network, address=address) def deallocate_fixed_ip(self, context, address, host=None, teardown=True, instance=None): """Returns a fixed IP to the pool.""" super(FlatManager, self).deallocate_fixed_ip(context, address, host, teardown, instance=instance) objects.FixedIP.disassociate_by_address(context, address) def _setup_network_on_host(self, context, network): """Setup Network on this host.""" # NOTE(tr3buchet): this does not need to happen on every ip # allocation, this functionality makes more sense in create_network # but we'd have to move the flat_injected flag to compute network.injected = CONF.flat_injected network.save() def _teardown_network_on_host(self, context, network): """Tear down network on this host.""" pass # NOTE(justinsb): The floating ip functions are stub-implemented. # We were throwing an exception, but this was messing up horizon. # Timing makes it difficult to implement floating ips here, in Essex. def get_floating_ip(self, context, id): """Returns a floating IP as a dict.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. return None def get_floating_pools(self, context): """Returns list of floating pools.""" # NOTE(maurosr) This method should be removed in future, replaced by # get_floating_ip_pools. See bug #1091668 return {} def get_floating_ip_pools(self, context): """Returns list of floating IP pools.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. return {} def get_floating_ip_by_address(self, context, address): """Returns a floating IP as a dict.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. return None def get_floating_ips_by_project(self, context): """Returns the floating IPs allocated to a project.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. return [] def get_floating_ips_by_fixed_address(self, context, fixed_address): """Returns the floating IPs associated with a fixed_address.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. return [] # NOTE(hanlind): This method can be removed in version 2.0 of the RPC API def allocate_floating_ip(self, context, project_id, pool): """Gets a floating IP from the pool.""" return None # NOTE(hanlind): This method can be removed in version 2.0 of the RPC API def deallocate_floating_ip(self, context, address, affect_auto_assigned): """Returns a floating IP to the pool.""" return None # NOTE(hanlind): This method can be removed in version 2.0 of the RPC API def associate_floating_ip(self, context, floating_address, fixed_address, affect_auto_assigned=False): """Associates a floating IP with a fixed IP. Makes sure everything makes sense then calls _associate_floating_ip, rpc'ing to correct host if i'm not it. """ return None # NOTE(hanlind): This method can be removed in version 2.0 of the RPC API def disassociate_floating_ip(self, context, address, affect_auto_assigned=False): """Disassociates a floating IP from its fixed IP. Makes sure everything makes sense then calls _disassociate_floating_ip, rpc'ing to correct host if i'm not it. """ return None def migrate_instance_start(self, context, instance_uuid, floating_addresses, rxtx_factor=None, project_id=None, source=None, dest=None): pass def migrate_instance_finish(self, context, instance_uuid, floating_addresses, host=None, rxtx_factor=None, project_id=None, source=None, dest=None): pass def update_dns(self, context, network_ids): """Called when fixed IP is allocated or deallocated.""" pass class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager): """Flat networking with dhcp. FlatDHCPManager will start up one dhcp server to give out addresses. It never injects network settings into the guest. It also manages bridges. Otherwise it behaves like FlatManager. """ SHOULD_CREATE_BRIDGE = True DHCP = True required_create_args = ['bridge'] def init_host(self): """Do any initialization that needs to be run if this is a standalone service. """ ctxt = context.get_admin_context() networks = objects.NetworkList.get_by_host(ctxt, self.host) self.driver.iptables_manager.defer_apply_on() self.l3driver.initialize(fixed_range=False, networks=networks) super(FlatDHCPManager, self).init_host() self.init_host_floating_ips() self.driver.iptables_manager.defer_apply_off() def _setup_network_on_host(self, context, network): """Sets up network on this host.""" network.dhcp_server = self._get_dhcp_ip(context, network) self._initialize_network(network) # NOTE(vish): if dhcp server is not set then don't dhcp if not CONF.fake_network and network.enable_dhcp: dev = self.driver.get_dev(network) # NOTE(dprince): dhcp DB queries require elevated context elevated = context.elevated() self.driver.update_dhcp(elevated, dev, network) if CONF.use_ipv6: self.driver.update_ra(context, dev, network) gateway = utils.get_my_linklocal(dev) network.gateway_v6 = gateway network.save() def _teardown_network_on_host(self, context, network): # NOTE(vish): if dhcp server is not set then don't dhcp if not CONF.fake_network and network.enable_dhcp: network['dhcp_server'] = self._get_dhcp_ip(context, network) dev = self.driver.get_dev(network) # NOTE(dprince): dhcp DB queries require elevated context elevated = context.elevated() self.driver.update_dhcp(elevated, dev, network) def _get_network_dict(self, network): """Returns the dict representing necessary and meta network fields.""" # get generic network fields network_dict = super(FlatDHCPManager, self)._get_network_dict(network) # get flat dhcp specific fields if self.SHOULD_CREATE_BRIDGE: network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE if network.get('bridge_interface'): network_dict['bridge_interface'] = network['bridge_interface'] if network.get('multi_host'): network_dict['multi_host'] = network['multi_host'] return network_dict class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager): """Vlan network with dhcp. VlanManager is the most complicated. It will create a host-managed vlan for each project. Each project gets its own subnet. The networks and associated subnets are created with nova-manage using a command like: nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks of 16 addresses from the beginning of the 10.0.0.0 range. A dhcp server is run for each subnet, so each project will have its own. For this mode to be useful, each project will need a vpn to access the instances in its subnet. """ SHOULD_CREATE_BRIDGE = True SHOULD_CREATE_VLAN = True DHCP = True required_create_args = ['bridge_interface'] def __init__(self, network_driver=None, *args, **kwargs): super(VlanManager, self).__init__(network_driver=network_driver, *args, **kwargs) # NOTE(cfb) VlanManager doesn't enforce quotas on fixed IP addresses # because a project is assigned an entire network. self.quotas_cls = objects.QuotasNoOp def init_host(self): """Do any initialization that needs to be run if this is a standalone service. """ LOG.debug('Setup network on host %s', self.host) ctxt = context.get_admin_context() networks = objects.NetworkList.get_by_host(ctxt, self.host) self.driver.iptables_manager.defer_apply_on() self.l3driver.initialize(fixed_range=False, networks=networks) NetworkManager.init_host(self) self.init_host_floating_ips() self.driver.iptables_manager.defer_apply_off() def allocate_fixed_ip(self, context, instance_id, network, **kwargs): """Gets a fixed IP from the pool.""" LOG.debug('Allocate fixed IP on network %s', network['uuid'], instance_uuid=instance_id) # NOTE(mriedem): allocate the vif before associating the # instance to reduce a race window where a previous instance # was associated with the fixed IP and has released it, because # release_fixed_ip will disassociate if allocated is False. vif = objects.VirtualInterface.get_by_instance_and_network( context, instance_id, network['id']) if vif is None: LOG.debug('vif for network %(network)s and instance ' '%(instance_id)s is used up, ' 'trying to create new vif', {'network': network['id'], 'instance_id': instance_id}) vif = self._add_virtual_interface(context, instance_id, network['id']) if kwargs.get('vpn', None): address = network['vpn_private_address'] fip = objects.FixedIP.associate(context, str(address), instance_id, network['id'], reserved=True, vif_id=vif.id) else: address = kwargs.get('address', None) if address: fip = objects.FixedIP.associate(context, str(address), instance_id, network['id'], vif_id=vif.id) else: fip = objects.FixedIP.associate_pool( context, network['id'], instance_id, vif_id=vif.id) address = fip.address if not kwargs.get('vpn', None): self._do_trigger_security_group_members_refresh_for_instance( instance_id) # NOTE(vish) This db query could be removed if we pass az and name # (or the whole instance object). instance = objects.Instance.get_by_uuid(context, instance_id) name = instance.display_name if self._validate_instance_zone_for_dns_domain(context, instance): self.instance_dns_manager.create_entry(name, address, "A", self.instance_dns_domain) self.instance_dns_manager.create_entry(instance_id, address, "A", self.instance_dns_domain) self._setup_network_on_host(context, network) LOG.debug('Allocated fixed IP %s on network %s', address, network['uuid'], instance=instance) return address def add_network_to_project(self, context, project_id, network_uuid=None): """Force adds another network to a project.""" LOG.debug('Add network %s to project %s', network_uuid, project_id) if network_uuid is not None: network_id = self.get_network(context, network_uuid).id else: network_id = None objects.Network.associate(context, project_id, network_id, force=True) def associate(self, context, network_uuid, associations): """Associate or disassociate host or project to network.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi to 2.0. LOG.debug('Associate network %s: |%s|', network_uuid, associations) network = self.get_network(context, network_uuid) network_id = network.id if 'host' in associations: host = associations['host'] if host is None: network.disassociate(context, network_id, host=True, project=False) else: network.host = self.host network.save() if 'project' in associations: project = associations['project'] if project is None: network.disassociate(context, network_id, host=False, project=True) else: network.associate(context, project, network_id, force=True) def _get_network_by_id(self, context, network_id): # NOTE(vish): Don't allow access to networks with project_id=None as # these are networks that haven't been allocated to a # project yet. return objects.Network.get_by_id(context, network_id, project_only=True) def _get_networks_by_uuids(self, context, network_uuids): # NOTE(vish): Don't allow access to networks with project_id=None as # these are networks that haven't been allocated to a # project yet. networks = objects.NetworkList.get_by_uuids( context, network_uuids, project_only=True) networks.sort(key=lambda x: network_uuids.index(x.uuid)) return networks def _get_networks_for_instance(self, context, instance_id, project_id, requested_networks=None): """Determine which networks an instance should connect to.""" # get networks associated with project if requested_networks is not None and len(requested_networks) != 0: network_uuids = [request.network_id for request in requested_networks] networks = self._get_networks_by_uuids(context, network_uuids) else: # NOTE(vish): Allocates network on demand so requires admin. networks = objects.NetworkList.get_by_project( context.elevated(), project_id) return networks def create_networks(self, context, **kwargs): """Create networks based on parameters.""" self._convert_int_args(kwargs) kwargs["vlan_start"] = kwargs.get("vlan_start") or CONF.vlan_start kwargs["num_networks"] = (kwargs.get("num_networks") or CONF.num_networks) kwargs["network_size"] = (kwargs.get("network_size") or CONF.network_size) # Check that num_networks + vlan_start is not > 4094, fixes lp708025 if kwargs["num_networks"] + kwargs["vlan_start"] > 4094: raise ValueError(_('The sum between the number of networks and' ' the vlan start cannot be greater' ' than 4094')) # Check that vlan is not greater than 4094 or less then 1 vlan_num = kwargs.get("vlan", None) if vlan_num is not None: try: vlan_num = int(vlan_num) except ValueError: raise ValueError(_("vlan must be an integer")) if vlan_num > 4094: raise ValueError(_('The vlan number cannot be greater than' ' 4094')) if vlan_num < 1: raise ValueError(_('The vlan number cannot be less than 1')) # check that num networks and network size fits in fixed_net fixed_net = netaddr.IPNetwork(kwargs['cidr']) if fixed_net.size < kwargs['num_networks'] * kwargs['network_size']: raise ValueError(_('The network range is not ' 'big enough to fit %(num_networks)s networks. Network ' 'size is %(network_size)s') % kwargs) kwargs['bridge_interface'] = (kwargs.get('bridge_interface') or CONF.vlan_interface) LOG.debug('Create network: |%s|', kwargs) return NetworkManager.create_networks( self, context, vpn=True, **kwargs) @utils.synchronized('setup_network', external=True) def _setup_network_on_host(self, context, network): """Sets up network on this host.""" if not network.vpn_public_address: address = CONF.vpn_ip network.vpn_public_address = address network.save() else: address = network.vpn_public_address network.dhcp_server = self._get_dhcp_ip(context, network) self._initialize_network(network) # NOTE(vish): only ensure this forward if the address hasn't been set # manually. if address == CONF.vpn_ip and hasattr(self.driver, "ensure_vpn_forward"): self.l3driver.add_vpn(CONF.vpn_ip, network.vpn_public_port, network.vpn_private_address) if not CONF.fake_network: dev = self.driver.get_dev(network) # NOTE(dprince): dhcp DB queries require elevated context if network.enable_dhcp: elevated = context.elevated() self.driver.update_dhcp(elevated, dev, network) if CONF.use_ipv6: self.driver.update_ra(context, dev, network) gateway = utils.get_my_linklocal(dev) network.gateway_v6 = gateway network.save() @utils.synchronized('setup_network', external=True) def _teardown_network_on_host(self, context, network): if not CONF.fake_network: network['dhcp_server'] = self._get_dhcp_ip(context, network) dev = self.driver.get_dev(network) # NOTE(ethuleau): For multi hosted networks, if the network is no # more used on this host and if VPN forwarding rule aren't handed # by the host, we delete the network gateway. vpn_address = network['vpn_public_address'] if (CONF.teardown_unused_network_gateway and network['multi_host'] and vpn_address != CONF.vpn_ip and not objects.Network.in_use_on_host(context, network['id'], self.host)): LOG.debug("Remove unused gateway %s", network['bridge']) if network.enable_dhcp: self.driver.kill_dhcp(dev) self.l3driver.remove_gateway(network) if not self._uses_shared_ip(network): fip = objects.FixedIP.get_by_address(context, network.dhcp_server) fip.allocated = False fip.host = None fip.save() # NOTE(vish): if dhcp server is not set then don't dhcp elif network.enable_dhcp: # NOTE(dprince): dhcp DB queries require elevated context elevated = context.elevated() self.driver.update_dhcp(elevated, dev, network) def _get_network_dict(self, network): """Returns the dict representing necessary and meta network fields.""" # get generic network fields network_dict = super(VlanManager, self)._get_network_dict(network) # get vlan specific network fields if self.SHOULD_CREATE_BRIDGE: network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE if self.SHOULD_CREATE_VLAN: network_dict['should_create_vlan'] = self.SHOULD_CREATE_VLAN for k in ['vlan', 'bridge_interface', 'multi_host']: if network.get(k): network_dict[k] = network[k] return network_dict @property def _bottom_reserved_ips(self): """Number of reserved IPs at the bottom of the range.""" return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server @property def _top_reserved_ips(self): """Number of reserved IPs at the top of the range.""" parent_reserved = super(VlanManager, self)._top_reserved_ips return parent_reserved + CONF.cnt_vpn_clients
the-stack_0_8220
""" PipelineWise CLI - Pipelinewise class """ import logging import os import shutil import signal import sys import json import copy import psutil import pidfile from datetime import datetime from time import time from typing import Dict, Optional, List from joblib import Parallel, delayed, parallel_backend from tabulate import tabulate from . import utils from . import commands from .commands import TapParams, TargetParams, TransformParams from .config import Config from .alert_sender import AlertSender from .alert_handlers.base_alert_handler import BaseAlertHandler # pylint: disable=too-many-lines,too-many-instance-attributes,too-many-public-methods class PipelineWise: """PipelineWise main Class""" INCREMENTAL = 'INCREMENTAL' LOG_BASED = 'LOG_BASED' FULL_TABLE = 'FULL_TABLE' STATUS_SUCCESS = 'SUCCESS' STATUS_FAILED = 'FAILED' TRANSFORM_FIELD_CONNECTOR_NAME = 'transform-field' def __init__(self, args, config_dir, venv_dir, profiling_dir=None): self.profiling_mode = args.profiler self.profiling_dir = profiling_dir self.drop_pg_slot = False self.args = args self.logger = logging.getLogger(__name__) self.config_dir = config_dir self.venv_dir = venv_dir self.extra_log = args.extra_log self.pipelinewise_bin = os.path.join(self.venv_dir, 'cli', 'bin', 'pipelinewise') self.config_path = os.path.join(self.config_dir, 'config.json') self.load_config() self.alert_sender = AlertSender(self.config.get('alert_handlers')) if args.tap != '*': self.tap = self.get_tap(args.target, args.tap) self.tap_bin = self.get_connector_bin(self.tap['type']) self.tap_python_bin = self.get_connector_python_bin(self.tap['type']) if args.target != '*': self.target = self.get_target(args.target) self.target_bin = self.get_connector_bin(self.target['type']) self.target_python_bin = self.get_connector_python_bin(self.target['type']) self.transform_field_bin = self.get_connector_bin(self.TRANSFORM_FIELD_CONNECTOR_NAME) self.transform_field_python_bin = self.get_connector_python_bin(self.TRANSFORM_FIELD_CONNECTOR_NAME) self.tap_run_log_file = None # Catch SIGINT and SIGTERM to exit gracefully for sig in [signal.SIGINT, signal.SIGTERM]: signal.signal(sig, self._exit_gracefully) def send_alert(self, message: str, level: str = BaseAlertHandler.ERROR, exc: Exception = None) -> dict: """ Send alert messages to every alert handler if sender is not disabled for the tap Args: message: the alert message level: alert level exc: optional exception that triggered the alert Returns: Dictionary with number of successfully sent alerts """ stats = {'sent': 0} send_alert = self.tap.get('send_alert', True) if send_alert: stats = self.alert_sender.send_to_all_handlers(message=message, level=level, exc=exc) return stats def create_consumable_target_config(self, target_config, tap_inheritable_config): """ Create consumable target config by appending "inheritable" config to the common target config """ dict_a, dict_b = {}, {} try: dict_a = utils.load_json(target_config) dict_b = utils.load_json(tap_inheritable_config) # Copy everything from dictB into dictA - Not a real merge dict_a.update(dict_b) # Save the new dict as JSON into a temp file tempfile_path = utils.create_temp_file(dir=self.get_temp_dir(), prefix='target_config_', suffix='.json')[1] utils.save_json(dict_a, tempfile_path) return tempfile_path except Exception as exc: raise Exception(f'Cannot merge JSON files {dict_a} {dict_b} - {exc}') from exc # pylint: disable=too-many-statements,too-many-branches,too-many-nested-blocks,too-many-locals,too-many-arguments def create_filtered_tap_properties(self, target_type, tap_type, tap_properties, tap_state, filters, create_fallback=False): """ Create a filtered version of tap properties file based on specific filter conditions. Return values: 1) A temporary JSON file where only those tables are selected to sync which meet the filter criteria 2) List of tap_stream_ids where filter criteria matched 3) OPTIONAL when create_fallback is True: Temporary JSON file with table that don't meet the filter criteria 4) OPTIONAL when create_fallback is True: List of tap_stream_ids where filter criteria don't match """ # Get filter conditions with default values from input dictionary # Nothing selected by default f_selected = filters.get('selected', None) f_target_type = filters.get('target_type', None) f_tap_type = filters.get('tap_type', None) f_replication_method = filters.get('replication_method', None) f_initial_sync_required = filters.get('initial_sync_required', None) # Lists of tables that meet and don't meet the filter criteria filtered_tap_stream_ids = [] fallback_filtered_stream_ids = [] self.logger.debug('Filtering properties JSON by conditions: %s', filters) try: # Load JSON files properties = utils.load_json(tap_properties) state = utils.load_json(tap_state) # Create a dictionary for tables that don't meet filter criteria fallback_properties = copy.deepcopy(properties) if create_fallback else {} # Foreach stream (table) in the original properties for stream_idx, stream in enumerate(properties.get('streams', tap_properties)): initial_sync_required = False # Collect required properties from the properties file tap_stream_id = stream.get('tap_stream_id') table_name = stream.get('table_name') metadata = stream.get('metadata', []) # Collect further properties from the properties file under the metadata key table_meta = {} meta_idx = 0 for meta_idx, meta in enumerate(metadata): if isinstance(meta, dict) and len(meta.get('breadcrumb', [])) == 0: table_meta = meta.get('metadata') break # Can we make sure that the stream has the right metadata? # To be safe, check if no right metadata has been found, then throw an exception. if not table_meta: self.logger.error('Stream %s has no metadata with no breadcrumbs: %s.', tap_stream_id, metadata) raise Exception(f'Missing metadata in stream {tap_stream_id}') selected = table_meta.get('selected', False) replication_method = table_meta.get('replication-method', None) # Detect if initial sync is required. Look into the state file, get the bookmark # for the current stream (table) and if valid bookmark doesn't exist then # initial sync is required bookmarks = state.get('bookmarks', {}) if isinstance(state, dict) else {} new_stream = False # if stream not in bookmarks, then it's a new table if tap_stream_id not in bookmarks: new_stream = True initial_sync_required = True else: stream_bookmark = bookmarks[tap_stream_id] if self._is_initial_sync_required(replication_method, stream_bookmark): initial_sync_required = True # Compare actual values to the filter conditions. # Set the "selected" key to True if actual values meet the filter criteria # Set the "selected" key to False if the actual values don't meet the filter criteria # pylint: disable=too-many-boolean-expressions if ( (f_selected is None or selected == f_selected) and (f_target_type is None or target_type in f_target_type) and (f_tap_type is None or tap_type in f_tap_type) and (f_replication_method is None or replication_method in f_replication_method) and (f_initial_sync_required is None or initial_sync_required == f_initial_sync_required) ): self.logger.debug("""Filter condition(s) matched: Table : %s Tap Stream ID : %s Selected : %s Replication Method : %s Init Sync Required : %s """, table_name, tap_stream_id, selected, replication_method, initial_sync_required) # Filter condition matched: mark table as selected to sync properties['streams'][stream_idx]['metadata'][meta_idx]['metadata']['selected'] = True filtered_tap_stream_ids.append(tap_stream_id) # Filter condition matched: # if the stream is a new table and is a singer stream, then mark it as selected to sync in the # the fallback properties as well if the table is selected in the original properties. # Otherwise, mark it as not selected if create_fallback: if new_stream and replication_method in [self.INCREMENTAL, self.LOG_BASED]: fallback_properties['streams'][stream_idx]['metadata'][meta_idx]['metadata'][ 'selected'] = True if selected: fallback_filtered_stream_ids.append(tap_stream_id) else: fallback_properties['streams'][stream_idx]['metadata'][meta_idx]['metadata'][ 'selected'] = False else: # Filter condition didn't match: mark table as not selected to sync properties['streams'][stream_idx]['metadata'][meta_idx]['metadata']['selected'] = False # Filter condition didn't match: mark table as selected to sync in the fallback properties # Fallback only if the table is selected in the original properties if create_fallback and selected is True: fallback_properties['streams'][stream_idx]['metadata'][meta_idx]['metadata']['selected'] = True fallback_filtered_stream_ids.append(tap_stream_id) # Save the generated properties file(s) and return # Fallback required: Save filtered and fallback properties JSON if create_fallback: # Save to files: filtered and fallback properties temp_properties_path = utils.create_temp_file(dir=self.get_temp_dir(), prefix='properties_', suffix='.json')[1] utils.save_json(properties, temp_properties_path) temp_fallback_properties_path = utils.create_temp_file(dir=self.get_temp_dir(), prefix='properties_', suffix='.json')[1] utils.save_json(fallback_properties, temp_fallback_properties_path) return temp_properties_path, \ filtered_tap_stream_ids, \ temp_fallback_properties_path, \ fallback_filtered_stream_ids # Fallback not required: Save only the filtered properties JSON temp_properties_path = utils.create_temp_file(dir=self.get_temp_dir(), prefix='properties_', suffix='.json')[1] utils.save_json(properties, temp_properties_path) return temp_properties_path, filtered_tap_stream_ids except Exception as exc: raise Exception(f'Cannot create JSON file - {exc}') from exc def load_config(self): """ Load configuration """ self.logger.debug('Loading config at %s', self.config_path) config = utils.load_json(self.config_path) if config: self.config = config else: self.config = {} def get_temp_dir(self): """ Returns the tap specific temp directory """ return os.path.join(self.config_dir, 'tmp') def get_tap_dir(self, target_id, tap_id): """ Get absolute path of a tap directory """ return os.path.join(self.config_dir, target_id, tap_id) def get_tap_log_dir(self, target_id, tap_id): """ Get absolute path of a tap log directory """ return os.path.join(self.get_tap_dir(target_id, tap_id), 'log') def get_target_dir(self, target_id): """ Get absolute path of a target directory """ return os.path.join(self.config_dir, target_id) def get_connector_bin(self, connector_type): """ Get absolute path of a connector executable """ return os.path.join(self.venv_dir, connector_type, 'bin', connector_type) def get_connector_python_bin(self, connector_type): """ Get absolute path of a connector python command """ return os.path.join(self.venv_dir, connector_type, 'bin', 'python') @classmethod def get_connector_files(cls, connector_dir): """ Get connector file paths """ return { 'config': os.path.join(connector_dir, 'config.json'), 'inheritable_config': os.path.join(connector_dir, 'inheritable_config.json'), 'properties': os.path.join(connector_dir, 'properties.json'), 'state': os.path.join(connector_dir, 'state.json'), 'transformation': os.path.join(connector_dir, 'transformation.json'), 'selection': os.path.join(connector_dir, 'selection.json'), 'pidfile': os.path.join(connector_dir, 'pipelinewise.pid') } def get_targets(self): """ Get every target """ self.logger.debug('Getting targets from %s', self.config_path) self.load_config() try: targets = self.config.get('targets', []) except Exception as exc: raise Exception('Targets not defined') from exc return targets def get_target(self, target_id: str) -> Dict: """ Get target by id """ self.logger.debug('Getting %s target', target_id) targets = self.get_targets() target = next((item for item in targets if item['id'] == target_id), False) if not target: raise Exception(f'Cannot find {target_id} target') target_dir = self.get_target_dir(target_id) if os.path.isdir(target_dir): target['files'] = self.get_connector_files(target_dir) else: raise Exception(f'Cannot find target at {target_dir}') return target def get_taps(self, target_id): """ Get every tap from a specific target """ self.logger.debug('Getting taps from %s target', target_id) target = self.get_target(target_id) try: taps = target['taps'] # Add tap status for tap_idx, tap in enumerate(taps): taps[tap_idx]['status'] = self.detect_tap_status(target_id, tap['id']) except Exception as exc: raise Exception(f'No taps defined for {target_id} target') from exc return taps def get_tap(self, target_id, tap_id): """ Get tap by id from a specific target """ self.logger.debug('Getting %s tap from target %s', tap_id, target_id) taps = self.get_taps(target_id) tap = next((item for item in taps if item['id'] == tap_id), False) if not tap: raise Exception(f'Cannot find {tap_id} tap in {target_id} target') tap_dir = self.get_tap_dir(target_id, tap_id) if os.path.isdir(tap_dir): tap['files'] = self.get_connector_files(tap_dir) else: raise Exception(f'Cannot find tap at {tap_dir}') # Add target and status details tap['target'] = self.get_target(target_id) tap['status'] = self.detect_tap_status(target_id, tap_id) return tap # pylint: disable=too-many-branches,too-many-statements,too-many-nested-blocks,too-many-locals def merge_schemas(self, old_schema, new_schema): """ Merge two schemas """ schema_with_diff = new_schema if not old_schema: schema_with_diff = new_schema else: new_streams = new_schema['streams'] old_streams = old_schema['streams'] for new_stream_idx, new_stream in enumerate(new_streams): new_tap_stream_id = new_stream['tap_stream_id'] old_stream = next((item for item in old_streams if item['tap_stream_id'] == new_tap_stream_id), False) # Is this a new stream? if not old_stream: new_schema['streams'][new_stream_idx]['is-new'] = True # Copy stream selection from the old properties else: # Find table specific metadata entries in the old and new streams new_stream_table_mdata_idx = 0 old_stream_table_mdata_idx = 0 try: new_stream_table_mdata_idx = \ [i for i, md in enumerate(new_stream['metadata']) if md['breadcrumb'] == []][0] old_stream_table_mdata_idx = \ [i for i, md in enumerate(old_stream['metadata']) if md['breadcrumb'] == []][0] except Exception: pass # Copy is-new flag from the old stream try: new_schema['streams'][new_stream_idx]['is-new'] = old_stream['is-new'] except Exception: pass # Copy selected from the old stream try: new_schema['streams'][new_stream_idx]['metadata'][new_stream_table_mdata_idx]['metadata'][ 'selected'] = old_stream['metadata'][old_stream_table_mdata_idx]['metadata']['selected'] except Exception: pass # Copy replication method from the old stream try: new_schema['streams'][new_stream_idx]['metadata'] \ [new_stream_table_mdata_idx]['metadata']['replication-method'] = \ old_stream['metadata'][old_stream_table_mdata_idx]['metadata']['replication-method'] except Exception: pass # Copy replication key from the old stream try: new_schema['streams'][new_stream_idx]['metadata'][new_stream_table_mdata_idx] \ ['metadata']['replication-key'] = \ old_stream['metadata'][old_stream_table_mdata_idx]['metadata'][ 'replication-key'] except Exception: pass # Is this new or modified field? new_fields = new_schema['streams'][new_stream_idx]['schema']['properties'] old_fields = old_stream['schema']['properties'] for new_field_key in new_fields: new_field = new_fields[new_field_key] new_field_mdata_idx = -1 # Find new field metadata index for i, mdata in enumerate(new_schema['streams'][new_stream_idx]['metadata']): if len(mdata['breadcrumb']) == 2 and mdata['breadcrumb'][0] == 'properties' and \ mdata['breadcrumb'][1] == new_field_key: new_field_mdata_idx = i # Field exists if new_field_key in old_fields.keys(): old_field = old_fields[new_field_key] old_field_mdata_idx = -1 # Find old field metadata index for i, mdata in enumerate(old_stream['metadata']): if len(mdata['breadcrumb']) == 2 and mdata['breadcrumb'][0] == 'properties' and \ mdata['breadcrumb'][1] == new_field_key: old_field_mdata_idx = i new_mdata = new_schema['streams'][new_stream_idx]['metadata'][new_field_mdata_idx][ 'metadata'] old_mdata = old_stream['metadata'][old_field_mdata_idx]['metadata'] # Copy is-new flag from the old properties try: new_mdata['is-new'] = old_mdata['is-new'] except Exception: pass # Copy is-modified flag from the old properties try: new_mdata['is-modified'] = old_mdata['is-modified'] except Exception: pass # Copy field selection from the old properties try: new_mdata['selected'] = old_mdata['selected'] except Exception: pass # Field exists and type is the same - Do nothing more in the schema if new_field == old_field: self.logger.debug('Field exists in %s stream with the same type: %s: %s', new_tap_stream_id, new_field_key, new_field) # Field exists but types are different - Mark the field as modified in the metadata else: self.logger.debug('Field exists in %s stream but types are different: %s: %s}', new_tap_stream_id, new_field_key, new_field) try: new_schema['streams'][new_stream_idx]['metadata'][new_field_mdata_idx]['metadata'][ 'is-modified'] = True new_schema['streams'][new_stream_idx]['metadata'][new_field_mdata_idx]['metadata'][ 'is-new'] = False except Exception: pass # New field - Mark the field as new in the metadata else: self.logger.debug('New field in stream %s: %s: %s', new_tap_stream_id, new_field_key, new_field) try: new_schema['streams'][new_stream_idx]['metadata'][new_field_mdata_idx]['metadata'][ 'is-new'] = True except Exception: pass schema_with_diff = new_schema return schema_with_diff def make_default_selection(self, schema, selection_file): """ Select the streams to sync in schema from a selection JSON file """ if os.path.isfile(selection_file): self.logger.debug('Loading pre defined selection from %s', selection_file) tap_selection = utils.load_json(selection_file) selection = tap_selection['selection'] streams = schema['streams'] for stream_idx, stream in enumerate(streams): tap_stream_id = stream.get('tap_stream_id') tap_stream_sel = False for sel in selection: if 'tap_stream_id' in sel and tap_stream_id.lower() == sel['tap_stream_id'].lower(): tap_stream_sel = sel # Find table specific metadata entries in the old and new streams try: stream_table_mdata_idx = [i for i, md in enumerate(stream['metadata']) if md['breadcrumb'] == []][0] except Exception as exc: raise Exception(f'Metadata of stream {tap_stream_id} doesn\'t have an empty breadcrumb') from exc if tap_stream_sel: self.logger.debug('Mark %s tap_stream_id as selected with properties %s', tap_stream_id, tap_stream_sel) schema['streams'][stream_idx]['metadata'][stream_table_mdata_idx]['metadata']['selected'] = True if 'replication_method' in tap_stream_sel: schema['streams'][stream_idx]['metadata'][stream_table_mdata_idx]['metadata'][ 'replication-method'] = tap_stream_sel['replication_method'] if 'replication_key' in tap_stream_sel: schema['streams'][stream_idx]['metadata'][stream_table_mdata_idx]['metadata'][ 'replication-key'] = tap_stream_sel['replication_key'] else: self.logger.debug('Mark %s tap_stream_id as not selected', tap_stream_id) schema['streams'][stream_idx]['metadata'][stream_table_mdata_idx]['metadata']['selected'] = False return schema def init(self): """ Initialise and create a sample project. The project will contain sample YAML configuration for every supported tap and target connects. """ self.logger.info('Initialising new project %s...', self.args.name) project_dir = os.path.join(os.getcwd(), self.args.name) # Create project dir if not exists if os.path.exists(project_dir): self.logger.error('Directory exists and cannot create new project: %s', self.args.name) sys.exit(1) else: os.mkdir(project_dir) for yaml in sorted(utils.get_sample_file_paths()): yaml_basename = os.path.basename(yaml) dst = os.path.join(project_dir, yaml_basename) self.logger.info('Creating %s...', yaml_basename) shutil.copyfile(yaml, dst) def test_tap_connection(self): """ Test the tap connection. It will connect to the data source that is defined in the tap and will return success if it’s available. """ tap_id = self.tap['id'] tap_type = self.tap['type'] target_id = self.target['id'] target_type = self.target['type'] self.logger.info('Testing %s (%s) tap connection in %s (%s) target', tap_id, tap_type, target_id, target_type) # Generate and run the command to run the tap directly # We will use the discover option to test connection tap_config = self.tap['files']['config'] command = f'{self.tap_bin} --config {tap_config} --discover' if self.profiling_mode: dump_file = os.path.join(self.profiling_dir, f'tap_{tap_id}.pstat') command = f'{self.tap_python_bin} -m cProfile -o {dump_file} {command}' result = commands.run_command(command) # Get output and errors from tap # pylint: disable=unused-variable returncode, new_schema, tap_output = result if returncode != 0: self.logger.error('Testing tap connection (%s - %s) FAILED', target_id, tap_id) sys.exit(1) # If the connection success then the response needs to be a valid JSON string if not utils.is_json(new_schema): self.logger.error('Schema discovered by %s (%s) is not a valid JSON.', tap_id, tap_type) sys.exit(1) else: self.logger.info('Testing tap connection (%s - %s) PASSED', target_id, tap_id) # pylint: disable=too-many-locals,inconsistent-return-statements def discover_tap(self, tap=None, target=None): """ Run a specific tap in discovery mode. Discovery mode is connecting to the data source and collecting information that is required for running the tap. """ if tap is None: tap = self.tap if target is None: target = self.target # Define tap props tap_id = tap.get('id') tap_type = tap.get('type') tap_config_file = tap.get('files', {}).get('config') tap_properties_file = tap.get('files', {}).get('properties') tap_selection_file = tap.get('files', {}).get('selection') tap_bin = self.get_connector_bin(tap_type) tap_python_bin = self.get_connector_python_bin(tap_type) # Define target props target_id = target.get('id') target_type = target.get('type') self.logger.info('Discovering %s (%s) tap in %s (%s) target...', tap_id, tap_type, target_id, target_type) # Generate and run the command to run the tap directly command = f'{tap_bin} --config {tap_config_file} --discover' if self.profiling_mode: dump_file = os.path.join(self.profiling_dir, f'tap_{tap_id}.pstat') command = f'{tap_python_bin} -m cProfile -o {dump_file} {command}' self.logger.debug('Discovery command: %s', command) result = commands.run_command(command) # Get output and errors from tap # pylint: disable=unused-variable returncode, new_schema, output = result if returncode != 0: return f'{target_id} - {tap_id}: {output}' # Convert JSON string to object try: new_schema = json.loads(new_schema) except Exception as exc: self.logger.exception(exc) return f'Schema discovered by {tap_id} ({tap_type}) is not a valid JSON.' # Merge the old and new schemas and diff changes old_schema = utils.load_json(tap_properties_file) if old_schema: schema_with_diff = self.merge_schemas(old_schema, new_schema) else: schema_with_diff = new_schema # Make selection from selection.json if exists try: schema_with_diff = self.make_default_selection(schema_with_diff, tap_selection_file) schema_with_diff = utils.delete_keys_from_dict( self.make_default_selection(schema_with_diff, tap_selection_file), # Removing multipleOf json schema validations from properties.json, # that's causing run time issues ['multipleOf']) except Exception as exc: return f'Cannot load selection JSON at {tap_selection_file}. {str(exc)}' # Post import checks post_import_errors = self._run_post_import_tap_checks(tap, schema_with_diff, target_id) if len(post_import_errors) > 0: return f'Post import tap checks failed in tap {tap_id}: {post_import_errors}' # Save the new catalog into the tap try: self.logger.info('Writing new properties file with changes into %s', tap_properties_file) utils.save_json(schema_with_diff, tap_properties_file) except Exception as exc: return f'Cannot save file. {str(exc)}' def detect_tap_status(self, target_id, tap_id): """ Detect status of a tap """ self.logger.debug('Detecting %s tap status in %s target', tap_id, target_id) tap_dir = self.get_tap_dir(target_id, tap_id) log_dir = self.get_tap_log_dir(target_id, tap_id) connector_files = self.get_connector_files(tap_dir) status = { 'currentStatus': 'unknown', 'lastStatus': 'unknown', 'lastTimestamp': None } # Tap exists but configuration not completed if not os.path.isfile(connector_files['config']): status['currentStatus'] = 'not-configured' # Tap exists and has log in running status elif os.path.isdir(log_dir) and len(utils.search_files(log_dir, patterns=['*.log.running'])) > 0: status['currentStatus'] = 'running' # Configured and not running else: status['currentStatus'] = 'ready' # Get last run instance if os.path.isdir(log_dir): log_files = utils.search_files(log_dir, patterns=['*.log.success', '*.log.failed'], sort=True) if len(log_files) > 0: last_log_file = log_files[0] log_attr = utils.extract_log_attributes(last_log_file) status['lastStatus'] = log_attr['status'] status['lastTimestamp'] = log_attr['timestamp'] return status def status(self): """ Prints a status summary table of every imported pipeline with their tap and target. """ targets = self.get_targets() tab_headers = [ 'Tap ID', 'Tap Type', 'Target ID', 'Target Type', 'Enabled', 'Status', 'Last Sync', 'Last Sync Result' ] tab_body = [] pipelines = 0 for target in targets: taps = self.get_taps(target['id']) for tap in taps: tab_body.append([ tap.get('id', '<Unknown>'), tap.get('type', '<Unknown>'), target.get('id', '<Unknown>'), target.get('type', '<Unknown>'), tap.get('enabled', '<Unknown>'), tap.get('status', {}).get('currentStatus', '<Unknown>'), tap.get('status', {}).get('lastTimestamp', '<Unknown>'), tap.get('status', {}).get('lastStatus', '<Unknown>') ]) pipelines += 1 print(tabulate(tab_body, headers=tab_headers, tablefmt='simple')) print(f'{pipelines} pipeline(s)') def run_tap_singer(self, tap: TapParams, target: TargetParams, transform: TransformParams, stream_buffer_size: int = 0) -> str: """ Generate and run piped shell command to sync tables using singer taps and targets """ # Build the piped executable command command = commands.build_singer_command(tap=tap, target=target, transform=transform, stream_buffer_size=stream_buffer_size, stream_buffer_log_file=self.tap_run_log_file, profiling_mode=self.profiling_mode, profiling_dir=self.profiling_dir) # Do not run if another instance is already running log_dir = os.path.dirname(self.tap_run_log_file) if os.path.isdir(log_dir) and len(utils.search_files(log_dir, patterns=['*.log.running'])) > 0: self.logger.info( 'Failed to run. Another instance of the same tap is already running. ' 'Log file detected in running status at %s', log_dir) sys.exit(1) start = None state = None def update_state_file(line: str) -> str: # Update state variable with latest state if utils.is_state_message(line): # if it has been more than 2 seconds since we last updated the state file # update it again with newly received state nonlocal start, state if start is None or time() - start >= 2: with open(tap.state, 'w') as state_file: state_file.write(line) # Update start time to be the current time. start = time() # Keep track of state message so that we do one last file update at the end of the run_tap_singer # function. This is to avoid the edge case where the last state message and the one before it are # less than 2 sec apart. state = line return line # Singer tap is running in subprocess. # Collect the formatted logs and log it in the main PipelineWise process as well. # Logs are already formatted at this stage so not using logging functions to avoid double formatting. def update_state_file_with_extra_log(line: str) -> str: sys.stdout.write(line) return update_state_file(line) # Run command with update_state_file as a callback to call for every stdout line if self.extra_log: commands.run_command(command, self.tap_run_log_file, update_state_file_with_extra_log) else: commands.run_command(command, self.tap_run_log_file, update_state_file) # update the state file one last time to make sure it always has the last state message. if state is not None: with open(tap.state, 'w') as statefile: statefile.write(state) def run_tap_fastsync(self, tap: TapParams, target: TargetParams, transform: TransformParams): """ Generating and running shell command to sync tables using the native fastsync components """ # Build the fastsync executable command command = commands.build_fastsync_command(tap=tap, target=target, transform=transform, venv_dir=self.venv_dir, temp_dir=self.get_temp_dir(), tables=self.args.tables, profiling_mode=self.profiling_mode, profiling_dir=self.profiling_dir, drop_pg_slot=self.drop_pg_slot) # Do not run if another instance is already running log_dir = os.path.dirname(self.tap_run_log_file) if os.path.isdir(log_dir) and len(utils.search_files(log_dir, patterns=['*.log.running'])) > 0: self.logger.info( 'Failed to run. Another instance of the same tap is already running. ' 'Log file detected in running status at %s', log_dir) sys.exit(1) # Fastsync is running in subprocess. # Collect the formatted logs and log it in the main PipelineWise process as well # Logs are already formatted at this stage so not using logging functions to avoid double formatting. def add_fastsync_output_to_main_logger(line: str) -> str: sys.stdout.write(line) return line if self.extra_log: # Run command and copy fastsync output to main logger commands.run_command(command, self.tap_run_log_file, add_fastsync_output_to_main_logger) else: # Run command commands.run_command(command, self.tap_run_log_file) # pylint: disable=too-many-statements,too-many-locals def run_tap(self): """ Generating command(s) to run tap to sync data from source to target The generated commands can use one or multiple commands of: 1. Fastsync: Native and optimised component to sync table from a specific type of tap into a specific type of target. This command will be used automatically when FULL_TABLE replication method selected or when initial sync is required. 2. Singer Taps and Targets: Dynamic components following the singer specification to sync tables from multiple sources to multiple targets. This command will be used automatically when INCREMENTAL and LOG_BASED replication method selected. FULL_TABLE replication are not using the singer components because they are too slow to sync large tables. """ tap_id = self.tap['id'] tap_type = self.tap['type'] target_id = self.target['id'] target_type = self.target['type'] stream_buffer_size = self.tap.get('stream_buffer_size', commands.DEFAULT_STREAM_BUFFER_SIZE) self.logger.info('Running %s tap in %s target', tap_id, target_id) # Run only if tap enabled if not self.tap.get('enabled', False): self.logger.info('Tap %s is not enabled.', self.tap['name']) sys.exit(1) # Run only if not running tap_status = self.detect_tap_status(target_id, tap_id) if tap_status['currentStatus'] == 'running': self.logger.info('Tap %s is currently running.', self.tap['name']) sys.exit(1) # Generate and run the command to run the tap directly tap_config = self.tap['files']['config'] tap_inheritable_config = self.tap['files']['inheritable_config'] tap_properties = self.tap['files']['properties'] tap_state = self.tap['files']['state'] tap_transformation = self.tap['files']['transformation'] target_config = self.target['files']['config'] # Some target attributes can be passed and override by tap (aka. inheritable config) # We merge the two configs and use that with the target cons_target_config = self.create_consumable_target_config(target_config, tap_inheritable_config) # Output will be redirected into target and tap specific log directory log_dir = self.get_tap_log_dir(target_id, tap_id) current_time = datetime.utcnow().strftime('%Y%m%d_%H%M%S') # Create fastsync and singer specific filtered tap properties that contains only # the the tables that needs to be synced by the specific command ( tap_properties_fastsync, fastsync_stream_ids, tap_properties_singer, singer_stream_ids ) = self.create_filtered_tap_properties( target_type, tap_type, tap_properties, tap_state, { 'selected': True, 'target_type': ['target-snowflake', 'target-redshift', 'target-postgres'], 'tap_type': ['tap-mysql', 'tap-postgres', 'tap-s3-csv', 'tap-mongodb'], 'initial_sync_required': True }, create_fallback=True) start_time = datetime.now() try: with pidfile.PIDFile(self.tap['files']['pidfile']): target_params = TargetParams(id=target_id, type=target_type, bin=self.target_bin, python_bin=self.target_python_bin, config=cons_target_config) transform_params = TransformParams(bin=self.transform_field_bin, python_bin=self.transform_field_python_bin, config=tap_transformation, tap_id=tap_id, target_id=target_id) # Run fastsync for FULL_TABLE replication method if len(fastsync_stream_ids) > 0: self.logger.info('Table(s) selected to sync by fastsync: %s', fastsync_stream_ids) self.tap_run_log_file = os.path.join(log_dir, f'{target_id}-{tap_id}-{current_time}.fastsync.log') tap_params = TapParams(id=tap_id, type=tap_type, bin=self.tap_bin, python_bin=self.tap_python_bin, config=tap_config, properties=tap_properties_fastsync, state=tap_state) self.run_tap_fastsync(tap=tap_params, target=target_params, transform=transform_params) else: self.logger.info('No table available that needs to be sync by fastsync') # Run singer tap for INCREMENTAL and LOG_BASED replication methods if len(singer_stream_ids) > 0: self.logger.info('Table(s) selected to sync by singer: %s', singer_stream_ids) self.tap_run_log_file = os.path.join(log_dir, f'{target_id}-{tap_id}-{current_time}.singer.log') tap_params = TapParams(id=tap_id, type=tap_type, bin=self.tap_bin, python_bin=self.tap_python_bin, config=tap_config, properties=tap_properties_singer, state=tap_state) self.run_tap_singer(tap=tap_params, target=target_params, transform=transform_params, stream_buffer_size=stream_buffer_size) else: self.logger.info('No table available that needs to be sync by singer') except pidfile.AlreadyRunningError: self.logger.error('Another instance of the tap is already running.') utils.silentremove(cons_target_config) utils.silentremove(tap_properties_fastsync) utils.silentremove(tap_properties_singer) sys.exit(1) # Delete temp files if there is any except commands.RunCommandException as exc: self.logger.exception(exc) utils.silentremove(cons_target_config) utils.silentremove(tap_properties_fastsync) utils.silentremove(tap_properties_singer) self._print_tap_run_summary(self.STATUS_FAILED, start_time, datetime.now()) self.send_alert(message=f'{tap_id} tap failed', exc=exc) sys.exit(1) except Exception as exc: utils.silentremove(cons_target_config) utils.silentremove(tap_properties_fastsync) utils.silentremove(tap_properties_singer) self._print_tap_run_summary(self.STATUS_FAILED, start_time, datetime.now()) self.send_alert(message=f'{tap_id} tap failed', exc=exc) raise exc utils.silentremove(cons_target_config) utils.silentremove(tap_properties_fastsync) utils.silentremove(tap_properties_singer) self._print_tap_run_summary(self.STATUS_SUCCESS, start_time, datetime.now()) def stop_tap(self): """ Stop running tap The command finds the tap specific pidfile that was created by run_tap command and sends a SIGINT to the process. The SIGINT signal triggers _exit_gracefully function automatically and the tap stops running. """ pidfile_path = self.tap['files']['pidfile'] try: with open(pidfile_path) as pidf: pid = int(pidf.read()) parent = psutil.Process(pid) # Terminate child processes for child in parent.children(recursive=True): self.logger.info('Sending SIGINT to child pid %s...', child.pid) child.send_signal(signal.SIGINT) # Terminate main process self.logger.info('Sending SIGINT to main pid %s...', parent.pid) parent.send_signal(signal.SIGINT) except ProcessLookupError: self.logger.error('Pid %s not found. Is the tap running on this machine? ' 'Stopping taps remotely is not supported.', pid) sys.exit(1) except FileNotFoundError: self.logger.error('No pidfile found at %s. Tap does not seem to be running.', pidfile_path) sys.exit(1) # pylint: disable=too-many-locals def sync_tables(self): """ Sync every or a list of selected tables from a specific tap. It performs an initial sync and resets the table bookmarks to their new location. The function is using the fastsync components hence it's only available for taps and targets where the native and optimised fastsync component is implemented. """ tap_id = self.tap['id'] tap_type = self.tap['type'] target_id = self.target['id'] target_type = self.target['type'] fastsync_bin = utils.get_fastsync_bin(self.venv_dir, tap_type, target_type) self.logger.info('Syncing tables from %s (%s) to %s (%s)...', tap_id, tap_type, target_id, target_type) # Run only if tap enabled if not self.tap.get('enabled', False): self.logger.info('Tap %s is not enabled.', self.tap['name']) sys.exit(1) # Run only if tap not running tap_status = self.detect_tap_status(target_id, tap_id) if tap_status['currentStatus'] == 'running': self.logger.info('Tap %s is currently running and cannot sync. Stop the tap and try again.', self.tap['name']) sys.exit(1) # Tap exists but configuration not completed if not os.path.isfile(fastsync_bin): self.logger.error('Table sync function is not implemented from %s datasources to %s type of targets', tap_type, target_type) sys.exit(1) # Generate and run the command to run the tap directly tap_config = self.tap['files']['config'] tap_inheritable_config = self.tap['files']['inheritable_config'] tap_properties = self.tap['files']['properties'] tap_state = self.tap['files']['state'] tap_transformation = self.tap['files']['transformation'] target_config = self.target['files']['config'] # Set drop_pg_slot to True if we want to sync the whole tap # This flag will be used by FastSync PG to (PG/SF/Redshift) self.drop_pg_slot = bool(not self.args.tables) # Some target attributes can be passed and override by tap (aka. inheritable config) # We merge the two configs and use that with the target cons_target_config = self.create_consumable_target_config(target_config, tap_inheritable_config) # Output will be redirected into target and tap specific log directory log_dir = self.get_tap_log_dir(target_id, tap_id) current_time = datetime.utcnow().strftime('%Y%m%d_%H%M%S') # sync_tables command always using fastsync try: with pidfile.PIDFile(self.tap['files']['pidfile']): self.tap_run_log_file = os.path.join(log_dir, f'{target_id}-{tap_id}-{current_time}.fastsync.log') # Create parameters as NamedTuples tap_params = TapParams( id=tap_id, type=tap_type, bin=self.tap_bin, python_bin=self.tap_python_bin, config=tap_config, properties=tap_properties, state=tap_state) target_params = TargetParams( id=target_id, type=target_type, bin=self.target_bin, python_bin=self.target_python_bin, config=cons_target_config ) transform_params = TransformParams( bin=self.transform_field_bin, config=tap_transformation, python_bin=self.transform_field_python_bin, tap_id=tap_id, target_id=target_id ) self.run_tap_fastsync(tap=tap_params, target=target_params, transform=transform_params) except pidfile.AlreadyRunningError: self.logger.error('Another instance of the tap is already running.') utils.silentremove(cons_target_config) sys.exit(1) # Delete temp file if there is any except commands.RunCommandException as exc: self.logger.exception(exc) utils.silentremove(cons_target_config) self.send_alert(message=f'Failed to sync tables in {tap_id} tap', exc=exc) sys.exit(1) except Exception as exc: utils.silentremove(cons_target_config) self.send_alert(message=f'Failed to sync tables in {tap_id} tap', exc=exc) raise exc utils.silentremove(cons_target_config) def validate(self): """ Validates a project directory with YAML tap and target files. """ yaml_dir = self.args.dir self.logger.info('Searching YAML config files in %s', yaml_dir) tap_yamls, target_yamls = utils.get_tap_target_names(yaml_dir) self.logger.info('Detected taps: %s', tap_yamls) self.logger.info('Detected targets: %s', target_yamls) target_schema = utils.load_schema('target') tap_schema = utils.load_schema('tap') vault_secret = self.args.secret target_ids = set() # Validate target json schemas and that no duplicate IDs exist for yaml_file in target_yamls: self.logger.info('Started validating %s', yaml_file) loaded_yaml = utils.load_yaml(os.path.join(yaml_dir, yaml_file), vault_secret) utils.validate(loaded_yaml, target_schema) if loaded_yaml['id'] in target_ids: self.logger.error('Duplicate target found "%s"', loaded_yaml['id']) sys.exit(1) target_ids.add(loaded_yaml['id']) self.logger.info('Finished validating %s', yaml_file) tap_ids = set() # Validate tap json schemas, check that every tap has valid 'target' and that no duplicate IDs exist for yaml_file in tap_yamls: self.logger.info('Started validating %s', yaml_file) loaded_yaml = utils.load_yaml(os.path.join(yaml_dir, yaml_file), vault_secret) utils.validate(loaded_yaml, tap_schema) if loaded_yaml['id'] in tap_ids: self.logger.error('Duplicate tap found "%s"', loaded_yaml['id']) sys.exit(1) if loaded_yaml['target'] not in target_ids: self.logger.error("Can'f find the target with the ID '%s' referenced in '%s'. Available target IDs: %s", loaded_yaml['target'], yaml_file, target_ids) sys.exit(1) tap_ids.add(loaded_yaml['id']) self.logger.info('Finished validating %s', yaml_file) self.logger.info('Validation successful') def import_project(self): """ Take a list of YAML files from a directory and use it as the source to build singer compatible json files and organise them into pipeline directory structure """ # Read the YAML config files and transform/save into singer compatible # JSON files in a common directory structure config = Config.from_yamls(self.config_dir, self.args.dir, self.args.secret) config.save() # Activating tap stream selections # # Run every tap in discovery mode to generate the singer specific # properties.json files for the taps. The properties file than # updated to replicate only the tables that is defined in the YAML # files and to use the required replication methods # # The tap Discovery mode needs to connect to each source databases and # doing that sequentially is slow. For a better performance we do it # in parallel. self.logger.info('ACTIVATING TAP STREAM SELECTIONS...') total_targets = 0 total_taps = 0 discover_excs = [] # Import every tap from every target start_time = datetime.now() for target in config.targets.values(): total_targets += 1 total_taps += len(target.get('taps')) with parallel_backend('threading', n_jobs=-1): # Discover taps in parallel and return the list of exception of the failed ones discover_excs.extend(list(filter(None, Parallel(verbose=100)(delayed(self.discover_tap)( tap=tap, target=target ) for tap in target.get('taps'))))) # Log summary end_time = datetime.now() # pylint: disable=logging-too-many-args self.logger.info( """ ------------------------------------------------------- IMPORTING YAML CONFIGS FINISHED ------------------------------------------------------- Total targets to import : %s Total taps to import : %s Taps imported successfully : %s Taps failed to import : %s Runtime : %s ------------------------------------------------------- """, total_targets, total_taps, total_taps - len(discover_excs), str(discover_excs), end_time - start_time ) if len(discover_excs) > 0: sys.exit(1) def encrypt_string(self): """ Encrypt the supplied string using the provided vault secret """ b_ciphertext = utils.vault_encrypt(self.args.string, self.args.secret) yaml_text = utils.vault_format_ciphertext_yaml(b_ciphertext) print(yaml_text) print('Encryption successful') def _is_initial_sync_required(self, replication_method: str, stream_bookmark: Dict) -> bool: """ Detects if a stream needs initial sync or not. Initial sync is required for INCREMENTAL and LOG_BASED tables where the state file has no valid bookmark. Valid bookmark keys: 'replication_key_value' key created for INCREMENTAL tables 'log_pos' key created by MySQL LOG_BASED tables 'lsn' key created by PostgreSQL LOG_BASED tables 'modified_since' key created by CSV S3 INCREMENTAL tables 'token' key created by MongoDB LOG_BASED tables FULL_TABLE replication method is taken as initial sync required :param replication_method: stream replication method :param stream_bookmark: stream state bookmark :return: Boolean, True if needs initial sync, False otherwise """ return replication_method == self.FULL_TABLE \ or (replication_method == self.INCREMENTAL and 'replication_key_value' not in stream_bookmark and 'modified_since' not in stream_bookmark) \ or (replication_method == self.LOG_BASED and 'lsn' not in stream_bookmark and 'log_pos' not in stream_bookmark and 'token' not in stream_bookmark) # pylint: disable=unused-argument def _exit_gracefully(self, sig, frame, exit_code=1): self.logger.info('Stopping gracefully...') # Rename log files from running to terminated status if self.tap_run_log_file: tap_run_log_file_running = f'{self.tap_run_log_file}.running' tap_run_log_file_terminated = f'{self.tap_run_log_file}.terminated' if os.path.isfile(tap_run_log_file_running): os.rename(tap_run_log_file_running, tap_run_log_file_terminated) sys.exit(exit_code) def _print_tap_run_summary(self, status, start_time, end_time): summary = f""" ------------------------------------------------------- TAP RUN SUMMARY ------------------------------------------------------- Status : {status} Runtime : {end_time - start_time} ------------------------------------------------------- """ # Print summary to stdout self.logger.info(summary) # Add summary to tap run log file if self.tap_run_log_file: tap_run_log_file_success = f'{self.tap_run_log_file}.success' tap_run_log_file_failed = f'{self.tap_run_log_file}.failed' # Find which log file we need to write the summary log_file_to_write_summary = None if os.path.isfile(tap_run_log_file_success): log_file_to_write_summary = tap_run_log_file_success elif os.path.isfile(tap_run_log_file_failed): log_file_to_write_summary = tap_run_log_file_failed # Append the summary to the right log file if log_file_to_write_summary: with open(log_file_to_write_summary, 'a') as logfile: logfile.write(summary) # pylint: disable=unused-variable def _run_post_import_tap_checks(self, tap: Dict, catalog: Dict, target_id: str) -> List: """ Run post import checks on a tap. :param tap: dictionary containing all taps details :param catalog: tap properties object :param target_id: ID of the target used by the tap :return: List of errors. If no error returns an empty list """ errors = [] error = self.__validate_transformations( tap.get('files', {}).get('transformation'), catalog, tap['id'], target_id) if error: errors.append(error) # Foreach stream (table) in the original properties for stream_idx, stream in enumerate(catalog.get('streams', catalog)): # Collect required properties from the properties file tap_stream_id = stream.get('tap_stream_id') metadata = stream.get('metadata', []) # Collect further properties from the tap and target properties table_meta = {} for meta_idx, meta in enumerate(metadata): if isinstance(meta, dict) and len(meta.get('breadcrumb', [])) == 0: table_meta = meta.get('metadata') break selected = table_meta.get('selected', False) replication_method = table_meta.get('replication-method') table_key_properties = table_meta.get('table-key-properties', []) primary_key_required = tap.get('primary_key_required', True) # Check if primary key is set for INCREMENTAL and LOG_BASED replications if (selected and replication_method in [self.INCREMENTAL, self.LOG_BASED] and len(table_key_properties) == 0 and primary_key_required): errors.append(f'No primary key set for {tap_stream_id} stream ({replication_method})') break return errors def __validate_transformations( self, transformation_file: str, catalog: Dict, tap_id: str, target_id: str) -> Optional[str]: """ Run validation of transformation config Args: transformation_file: path to transformation config catalog: Catalog object tap_id: The ID of the tap to which the transformations belong target_id: the ID of the target used by the tap Returns: error as string """ if transformation_file: # create a temp file with the content being the given catalog object # we need this file to execute the validation cli command temp_catalog_file = utils.create_temp_file(dir=self.get_temp_dir(), prefix='properties_', suffix='.json')[1] utils.save_json(catalog, temp_catalog_file) command = f""" {self.transform_field_bin} --validate --config {transformation_file} --catalog {temp_catalog_file} """ if self.profiling_mode: dump_file = os.path.join(self.profiling_dir, f'transformation_{tap_id}_{target_id}.pstat') command = f'{self.transform_field_python_bin} -m cProfile -o {dump_file} {command}' self.logger.debug('Transformation validation command: %s', command) result = commands.run_command(command) # Get output and errors from command returncode, _, stderr = result if returncode != 0: return stderr
the-stack_0_8221
import pytest from plenum.common.exceptions import RequestRejectedException, \ RequestNackedException from indy_common.constants import POOL_RESTART, ACTION, START, DATETIME from plenum.common.constants import TXN_TYPE from plenum.test.helper import sdk_gen_request, sdk_sign_and_submit_req_obj, \ sdk_get_reply, sdk_get_and_check_replies def test_fail_pool_restart_with_steward_role( sdk_pool_handle, sdk_wallet_steward, looper): op = { TXN_TYPE: POOL_RESTART, ACTION: START, } req_obj = sdk_gen_request(op, identifier=sdk_wallet_steward[1]) req = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet_steward, req_obj) with pytest.raises(RequestRejectedException) as excinfo: sdk_get_and_check_replies(looper, [req], 100) assert excinfo.match("STEWARD cannot do action with type = " + POOL_RESTART) def test_fail_pool_restart_with_invalid_datetime( sdk_pool_handle, sdk_wallet_steward, looper): invalid_datetime = "12.05.2018 4/40" op = { TXN_TYPE: POOL_RESTART, ACTION: START, DATETIME: invalid_datetime } req_obj = sdk_gen_request(op, identifier=sdk_wallet_steward[1]) req = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet_steward, req_obj) with pytest.raises(RequestNackedException) as excinfo: sdk_get_and_check_replies(looper, [req], 100) assert excinfo.match("datetime " + invalid_datetime + " is not valid")
the-stack_0_8224
# -*- coding: utf-8 -*- from __future__ import division import numpy as np import pandas as pd from scipy.stats import multivariate_normal from pgmpy.factors.base import BaseFactor class LinearGaussianCPD(BaseFactor): """ For, X -> Y the Linear Gaussian model assumes that the mean of Y is a linear function of mean of X and the variance of Y does not depend on X. For example, $ p(Y|X) = N(-2x + 0.9 ; 1) $ Here, $ x $ is the mean of the variable $ X $. Let $ Y $ be a continuous variable with continuous parents $ X1, X2, ..., Xk $. We say that $ Y $ has a linear Gaussian CPD if there are parameters $ \beta_0, \beta_1, ..., \beta_k $ and $ \sigma_2 $ such that, $ p(Y |x1, x2, ..., xk) = \mathcal{N}(\beta_0 + x1*\beta_1 + ......... + xk*\beta_k ; \sigma_2) $ In vector notation, $ p(Y |x) = \mathcal{N}(\beta_0 + \boldmath{β}.T * \boldmath{x} ; \sigma_2) $ Reference: https://cedar.buffalo.edu/~srihari/CSE574/Chap8/Ch8-PGM-GaussianBNs/8.5%20GaussianBNs.pdf """ def __init__( self, variable, evidence_mean, evidence_variance, evidence=[], beta=None ): """ Parameters ---------- variable: any hashable python object The variable whose CPD is defined. evidence_mean: Mean vector (numpy array) of the joint distribution, X evidence_variance: int, float The variance of the multivariate gaussian, X = ['x1', 'x2', ..., 'xn'] evidence: iterable of any hashabale python objects An iterable of the parents of the variable. None if there are no parents. beta (optional): iterable of int or float An iterable representing the coefficient vector of the linear equation. The first term represents the constant term in the linear equation. Examples -------- # For P(Y| X1, X2, X3) = N(-2x1 + 3x2 + 7x3 + 0.2; 9.6) >>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3']) >>> cpd.variable 'Y' >>> cpd.evidence ['x1', 'x2', 'x3'] >>> cpd.beta_vector [0.2, -2, 3, 7] """ self.variable = variable self.mean = evidence_mean self.variance = evidence_variance self.evidence = evidence self.sigma_yx = None if beta is not None: self.beta = beta self.beta_0 = beta[0] self.beta_vector = np.asarray(beta[1:]) if len(evidence) != len(beta) - 1: raise ValueError( "The number of variables in evidence must be one less than the length of the beta vector." ) variables = [variable] + evidence super(LinearGaussianCPD, self).__init__( variables, pdf="gaussian", mean=self.mean, covariance=self.variance ) def sum_of_product(self, xi, xj): prod_xixj = xi * xj return np.sum(prod_xixj) def maximum_likelihood_estimator(self, data, states): """ Fit using MLE method. Parameters ---------- data: pandas.DataFrame or 2D array Dataframe of values containing samples from the conditional distribution, (Y|X) and corresponding X values. states: All the input states that are jointly gaussian. Returns ------- beta, variance (tuple): Returns estimated betas and the variance. """ x_df = pd.DataFrame(data, columns=states) x_len = len(self.evidence) sym_coefs = [] for i in range(0, x_len): sym_coefs.append("b" + str(i + 1) + "_coef") sum_x = x_df.sum() x = [sum_x["(Y|X)"]] coef_matrix = pd.DataFrame(columns=sym_coefs) # First we compute just the coefficients of beta_1 to beta_N. # Later we compute beta_0 and append it. for i in range(0, x_len): x.append(self.sum_of_product(x_df["(Y|X)"], x_df[self.evidence[i]])) for j in range(0, x_len): coef_matrix.loc[i, sym_coefs[j]] = self.sum_of_product( x_df[self.evidence[i]], x_df[self.evidence[j]] ) coef_matrix.insert(0, "b0_coef", sum_x[self.evidence].values) row_1 = np.append([len(x_df)], sum_x[self.evidence].values) coef_matrix.loc[-1] = row_1 coef_matrix.index = coef_matrix.index + 1 # shifting index coef_matrix.sort_index(inplace=True) beta_coef_matrix = np.matrix(coef_matrix.values, dtype="float") coef_inv = np.linalg.inv(beta_coef_matrix) beta_est = np.array(np.matmul(coef_inv, np.transpose(x))) self.beta = beta_est[0] sigma_est = 0 x_len_df = len(x_df) for i in range(0, x_len): for j in range(0, x_len): sigma_est += ( self.beta[i + 1] * self.beta[j + 1] * ( self.sum_of_product( x_df[self.evidence[i]], x_df[self.evidence[j]] ) / x_len_df - np.mean(x_df[self.evidence[i]]) * np.mean(x_df[self.evidence[j]]) ) ) sigma_est = np.sqrt( self.sum_of_product(x_df["(Y|X)"], x_df["(Y|X)"]) / x_len_df - np.mean(x_df["(Y|X)"]) * np.mean(x_df["(Y|X)"]) - sigma_est ) self.sigma_yx = sigma_est return self.beta, self.sigma_yx def fit(self, data, states, estimator=None, complete_samples_only=True, **kwargs): """ Determine βs from data Parameters ---------- data: pandas.DataFrame Dataframe containing samples from the conditional distribution, p(Y|X) estimator: 'MLE' or 'MAP' completely_samples_only: boolean (True or False) Are they downsampled or complete? Defaults to True """ if estimator == "MLE": mean, variance = self.maximum_likelihood_estimator(data, states) elif estimator == "MAP": raise NotImplementedError( "fit method has not been implemented using Maximum A-Priori (MAP)" ) return mean, variance @property def pdf(self): def _pdf(*args): # The first element of args is the value of the variable on which CPD is defined # and the rest of the elements give the mean values of the parent # variables. mean = ( sum([arg * coeff for (arg, coeff) in zip(args[1:], self.beta_vector)]) + self.beta_0 ) return multivariate_normal.pdf( args[0], np.array(mean), np.array([[self.variance]]) ) return _pdf def copy(self): """ Returns a copy of the distribution. Returns ------- LinearGaussianCPD: copy of the distribution Examples -------- >>> from pgmpy.factors.continuous import LinearGaussianCPD >>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3']) >>> copy_cpd = cpd.copy() >>> copy_cpd.variable 'Y' >>> copy_cpd.evidence ['X1', 'X2', 'X3'] """ copy_cpd = LinearGaussianCPD( self.variable, self.beta, self.variance, list(self.evidence) ) return copy_cpd def __str__(self): if self.evidence and list(self.beta_vector): # P(Y| X1, X2, X3) = N(-2*X1_mu + 3*X2_mu + 7*X3_mu; 0.2) rep_str = "P({node} | {parents}) = N({mu} + {b_0}; {sigma})".format( node=str(self.variable), parents=", ".join([str(var) for var in self.evidence]), mu=" + ".join( [ "{coeff}*{parent}".format(coeff=coeff, parent=parent) for coeff, parent in zip(self.beta_vector, self.evidence) ] ), b_0=str(self.beta_0), sigma=str(self.variance), ) else: # P(X) = N(1, 4) rep_str = "P({X}) = N({beta_0}; {variance})".format( X=str(self.variable), beta_0=str(self.beta_0), variance=str(self.variance), ) return rep_str
the-stack_0_8226
# Copyright 2019, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import re from copy import copy logger = logging.getLogger(__name__) OC_RESOURCE_TYPE = 'OC_RESOURCE_TYPE' OC_RESOURCE_LABELS = 'OC_RESOURCE_LABELS' # Matches anything outside ASCII 32-126 inclusive _NON_PRINTABLE_ASCII = re.compile( r'[^ !"#$%&\'()*+,\-./:;<=>?@\[\\\]^_`{|}~0-9a-zA-Z]') # Label key/value tokens, may be quoted _WORD_RES = r'(\'[^\']*\'|"[^"]*"|[^\s,=]+)' _KV_RE = re.compile(r""" \s* # ignore leading spaces (?P<key>{word_re}) # capture the key word \s*=\s* (?P<val>{word_re}) # capture the value word \s* # ignore trailing spaces """.format(word_re=_WORD_RES), re.VERBOSE) _LABELS_RE = re.compile(r""" ^\s*{word_re}\s*=\s*{word_re}\s* # _KV_RE without the named groups (,\s*{word_re}\s*=\s*{word_re}\s*)* # more KV pairs, comma delimited $ """.format(word_re=_WORD_RES), re.VERBOSE) _UNQUOTE_RE = re.compile(r'^([\'"]?)([^\1]*)(\1)$') def merge_resources(resource_list): """Merge multiple resources to get a new resource. Resources earlier in the list take precedence: if multiple resources share a label key, use the value from the first resource in the list with that key. The combined resource's type will be the first non-null type in the list. :type resource_list: list(:class:`Resource`) :param resource_list: The list of resources to combine. :rtype: :class:`Resource` :return: The new combined resource. """ if not resource_list: raise ValueError rtype = None for rr in resource_list: if rr.type: rtype = rr.type break labels = {} for rr in reversed(resource_list): labels.update(rr.labels) return Resource(rtype, labels) def check_ascii_256(string): """Check that `string` is printable ASCII and at most 256 chars. Raise a `ValueError` if this check fails. Note that `string` itself doesn't have to be ASCII-encoded. :type string: str :param string: The string to check. """ if string is None: return if len(string) > 256: raise ValueError("Value is longer than 256 characters") bad_char = _NON_PRINTABLE_ASCII.search(string) if bad_char: raise ValueError(u'Character "{}" at position {} is not printable ' 'ASCII' .format( string[bad_char.start():bad_char.end()], bad_char.start())) class Resource(object): """A description of the entity for which signals are reported. `type_` and `labels`' keys and values should contain only printable ASCII and should be at most 256 characters. See: https://github.com/census-instrumentation/opencensus-specs/blob/master/resource/Resource.md :type type_: str :param type_: The resource type identifier. :type labels: dict :param labels: Key-value pairs that describe the entity. """ # noqa def __init__(self, type_=None, labels=None): if type_ is not None and not type_: raise ValueError("Resource type must not be empty") check_ascii_256(type_) if labels is None: labels = {} for key, value in labels.items(): if not key: raise ValueError("Resource key must not be null or empty") if value is None: raise ValueError("Resource value must not be null") check_ascii_256(key) check_ascii_256(value) self.type = type_ self.labels = copy(labels) def get_type(self): """Get this resource's type. :rtype: str :return: The resource's type. """ return self.type def get_labels(self): """Get this resource's labels. :rtype: dict :return: The resource's label dict. """ return copy(self.labels) def merge(self, other): """Get a copy of this resource combined with another resource. The combined resource will have the union of both resources' labels, keeping this resource's label values if they conflict. :type other: :class:`Resource` :param other: The other resource to merge. :rtype: :class:`Resource` :return: The new combined resource. """ return merge_resources([self, other]) def unquote(string): """Strip quotes surrounding `string` if they exist. >>> unquote('abc') 'abc' >>> unquote('"abc"') 'abc' >>> unquote("'abc'") 'abc' >>> unquote('"a\\'b\\'c"') "a'b'c" """ return _UNQUOTE_RE.sub(r'\2', string) def parse_labels(labels_str): """Parse label keys and values following the Resource spec. >>> parse_labels("k=v") {'k': 'v'} >>> parse_labels("k1=v1, k2=v2") {'k1': 'v1', 'k2': 'v2'} >>> parse_labels("k1='v1,=z1'") {'k1': 'v1,=z1'} """ if not _LABELS_RE.match(labels_str): return None labels = {} for kv in _KV_RE.finditer(labels_str): gd = kv.groupdict() key = unquote(gd['key']) if key in labels: logger.warning('Duplicate label key "%s"', key) labels[key] = unquote(gd['val']) return labels def get_from_env(): """Get a Resource from environment variables. :rtype: :class:`Resource` :return: A resource with type and labels from the environment. """ type_env = os.getenv(OC_RESOURCE_TYPE) if type_env is None: return None type_env = type_env.strip() labels_env = os.getenv(OC_RESOURCE_LABELS) if labels_env is None: return Resource(type_env) labels = parse_labels(labels_env) return Resource(type_env, labels)
the-stack_0_8229
import matplotlib.pyplot as plt import numpy as np import vae.training import vae.cvae_model import vae.regression_model import vae.dataman import torch import torch.nn as nn dataset = vae.dataman.DataManager( mappings={ 'sigma': None, 'albedo': lambda a: np.power(1 - a, 1.0 / 6), 'g': None, # 'logscat': None, 'output_z': None, 'output_b': None, 'output_a': None }, blocks=(3, 3) ) dataset.load_file("./DataSets/SphereScattersDataSet.npz") #, limit=1024*1024) print('Loaded data... '+str(dataset.data.shape)) def test_dataset(sigma, albedo, g): test_data = dataset.get_filtered_data({ 'sigma': (sigma - 1, sigma + 2), 'albedo': (albedo - 0.001, albedo), 'g': (g - 0.2, g + 0.2) }) if len(test_data) == 0: print(f'[ERROR] Config {sigma},{albedo},{g} has no data.') return print(len(test_data)) plt.figure() # drawing histograms from empirical z position distribution z_pos = test_data[:, 3].cpu().numpy() plt.hist(z_pos, density=True, bins=80) # test_dataset(16.0, 0.99, 0.0) # plt.show() # exit() def cvae_factory(epochs, batch_size): print('Creating CVAE model...') model = vae.cvae_model.CVAEModel(3, 3, 4, 8, 4, activation=nn.LeakyReLU) optimizer = torch.optim.AdamW(model.parameters(), lr=0.001) # optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.0000001) gamma = np.exp(np.log(0.02) / epochs) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma) return model, optimizer, scheduler path = "./Running/sphere_cvae" # vae.training.clear_training(path) # comment if you want to reuse from previous training state = vae.training.start_training(path, dataset, cvae_factory, batch_size=128*1024, epochs=8000) print('Training finished at epoch ...'+str(len(state.history))) loss_history = np.array([h[0] for h in state.history]) epoch_list = np.arange(0, len(loss_history), 1) plt.figure() plt.plot(epoch_list, loss_history) # testing model evaluation dataset = vae.dataman.DataManager( mappings={ 'sigma': None, 'albedo': lambda a: np.power(1 - a, 1.0 / 6), 'g': None, 'output_z': None, 'output_b': None, 'output_a': None, 'logscat': None, }, blocks=(3, 1, 3) ) dataset.load_file("./DataSets/Test_SphereScattersDataSet.npz") dataset.set_device(vae.training.DEFAULT_DEVICE) print('Loaded data for testing... '+str(dataset.data.shape)) state.model.train(False) def test_setting(sigma, albedo, g): test_data = dataset.get_filtered_data({ 'sigma': (sigma - .5, sigma + .5), 'albedo': (albedo - 0.0001, albedo), 'g': (g - 0.01, g + 0.01) }) print('Testing data frame '+str(test_data.shape)) if len(test_data) == 0: print(f'[ERROR] Config {sigma},{albedo},{g} has no data.') return plt.figure() # drawing histograms from empirical z position distribution weights = np.exp(test_data[:, 6].cpu().numpy()) z_pos = test_data[:, 3].cpu().numpy() plt.hist(z_pos, weights=weights, density=True, bins=80) # plt.hist(z_pos, density=True, bins=80) # drawing histograms from model sampling distribution internal_albedo = np.power(1.0 - albedo, 1.0/6) sigma_test = torch.Tensor(10000, 1).fill_(sigma).to(vae.training.DEFAULT_DEVICE) albedo_test = torch.Tensor(10000, 1).fill_(internal_albedo).to(vae.training.DEFAULT_DEVICE) g_test = torch.Tensor(10000, 1).fill_(g).to(vae.training.DEFAULT_DEVICE) y_test = state.model.conditional_sampling(torch.cat([sigma_test, albedo_test, g_test], dim=1)) plt.hist(torch.clamp(y_test[:, 0], -1, 1).cpu().detach().numpy(), density=True, bins=80, histtype='step') plt.show() testing_sigmas = [1.0, 4.0, 9.0, 20.0] testing_albedos = [0.95] # testing_albedos = [1.0, 0.999, 0.95, 0.8] testing_gs = [-0.5, 0.0, 0.7, 0.875] for sigma in testing_sigmas: for albedo in testing_albedos: for g in testing_gs: test_setting(sigma, albedo, g) # testing model evaluation plt.show()
the-stack_0_8232
# SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2020 Intel Corporation """REST inteface to model_runner.""" import io from logging import exception import sanic from sanic import response from logger import logger from model_hub import ModelLoader from model_hub import ImageProcessor from model_hub import ModelRunner app = sanic.Sanic("dlrs-torchub") model_loader = ModelLoader() @app.route("/") async def index(request): """index""" return response.json( { "info": "torch hub server on dlrs", "urls": ["/", "/ping", "/serve", "/predict"], } ) @app.route("/ping") async def ping(request): """heartbeat.""" return response.json({"status": "ok"}) @app.route("/serve", methods=["POST"]) async def load_model(request): """load model using process pool.""" global model_loader req = request.json if req is None: return response.json( {"status": "fail", "result": "model param json not provided"} ) if req.get("path", None) and req.get("name", None): if not model_loader: model_loader = ModelLoader() model_loader.init_model(req) if model_loader.loaded: return response.json( {"status": "ok", "result": f"model {req['name']} loaded"} ) try: request.app.loop.run_in_executor(None, model_loader.load_model) except RuntimeError: return response.json( {"status": "fail", "result": "model or path not retievable"} ) return response.json( {"status": "ok", "result": f"model {req['name']} loading in progress"} ) else: model_loader = None raise sanic.exceptions.SanicException( "model_path/model_name not given", status_code=401 ) @app.route("/predict", methods=["POST"]) async def predict(request): """return output of the model.""" img_str = request.files["img"] image = io.BytesIO(img_str[0].body) img_processor = ImageProcessor(image) img_tensor = img_processor.transform() try: model_runner = ModelRunner(model_loader.model, img_tensor) except AttributeError: return response.json( {"status": "failed", "result": "model not initiated, use serve model API"} ) return response.json({"status": "ok", "result": model_runner.predict()}) if __name__ == "__main__": app.run(host="0.0.0.0", port=5550)
the-stack_0_8236
# IMPORTATION STANDARD # IMPORTATION THIRDPARTY import requests import pandas as pd import pytest # IMPORTATION INTERNAL from gamestonk_terminal.stocks.due_diligence import ark_model @pytest.fixture(scope="module") def vcr_config(): return { "filter_headers": [("User-Agent", None)], "filter_query_parameters": [ ("period1", "1598220000"), ("period2", "1635980400"), ], } @pytest.mark.default_cassette("test_get_ark_trades_by_ticker_TSLA") @pytest.mark.vcr def test_get_ark_trades_by_ticker(recorder): result_df = ark_model.get_ark_trades_by_ticker(ticker="TSLA") recorder.capture(result_df) @pytest.mark.default_cassette("test_get_ark_trades_by_ticker_INVALID_TICKER") @pytest.mark.vcr def test_get_ark_trades_by_ticker_invalid_ticker(): result_df = ark_model.get_ark_trades_by_ticker(ticker="INVALID_TICKER") assert result_df.empty @pytest.mark.default_cassette("test_get_ark_trades_by_ticker_TSLA") @pytest.mark.vcr(record_mode="none") def test_get_ark_trades_by_ticker_invalid_json(mocker): mocker.patch( target="json.loads", new=mocker.Mock( return_value={ "props": { "pageProps": [], } } ), ) result_df = ark_model.get_ark_trades_by_ticker(ticker="TSLA") assert result_df.empty @pytest.mark.vcr(record_mode="none") def test_get_ark_trades_by_ticker_invalid_status(mocker): mock_response = requests.Response() mock_response.status_code = 400 mocker.patch( target="requests.get", new=mocker.Mock(return_value=mock_response), ) result_df = ark_model.get_ark_trades_by_ticker(ticker="TSLA") assert result_df.empty @pytest.mark.default_cassette("test_get_ark_trades_by_ticker_TSLA") @pytest.mark.vcr(record_mode="none") def test_get_ark_trades_by_ticker_json_normalize(mocker): mock_df = pd.DataFrame() mocker.patch( target="pandas.json_normalize", new=mocker.Mock(return_value=mock_df), ) result_df = ark_model.get_ark_trades_by_ticker(ticker="TSLA") assert result_df.empty
the-stack_0_8238
# Python libraries import argparse import os # Lib files import lib.medloaders as medical_loaders import lib.medzoo as medzoo import lib.train as train import lib.utils as utils from lib.losses3D import DiceLoss from lib.visual3D_temp import * os.environ["CUDA_VISIBLE_DEVICES"] = "0,2" seed = 1777777 def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training() visualize_3D_no_overlap_new(args, full_volume, affine, model, 10, args.dim) def get_arguments(): parser = argparse.ArgumentParser() parser.add_argument('--batchSz', type=int, default=2) parser.add_argument('--dataset_name', type=str, default="mrbrains4") parser.add_argument('--dim', nargs="+", type=int, default=(64, 64, 32)) parser.add_argument('--nEpochs', type=int, default=1) parser.add_argument('--classes', type=int, default=4) parser.add_argument('--samples_train', type=int, default=20) parser.add_argument('--samples_val', type=int, default=20) parser.add_argument('--inChannels', type=int, default=3) parser.add_argument('--inModalities', type=int, default=3) parser.add_argument('--fold_id', default='1', type=str, help='Select subject for fold validation') parser.add_argument('--lr', default=1e-3, type=float, help='learning rate (default: 1e-3)') parser.add_argument('--cuda', action='store_true', default=False) parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--model', type=str, default='DENSENET3', choices=('VNET', 'VNET2', 'UNET3D', 'DENSENET1', 'DENSENET2', 'DENSENET3', 'HYPERDENSENET')) parser.add_argument('--opt', type=str, default='sgd', choices=('sgd', 'adam', 'rmsprop')) parser.add_argument('--log_dir', type=str, default='../runs/') args = parser.parse_args() args.save = '/data/hejy/MedicalZooPytorch/saved_models/' + args.model + '_checkpoints/' + args.model + '_{}_{}_'.format( utils.datestr(), args.dataset_name) return args if __name__ == '__main__': main()
the-stack_0_8240
#@+leo-ver=5-thin #@+node:ekr.20031218072017.3603: * @file leoUndo.py '''Leo's undo/redo manager.''' #@+<< How Leo implements unlimited undo >> #@+node:ekr.20031218072017.2413: ** << How Leo implements unlimited undo >> #@+at Think of the actions that may be Undone or Redone as a string of beads # (g.Bunches) containing all information needed to undo _and_ redo an operation. # # A bead pointer points to the present bead. Undoing an operation moves the bead # pointer backwards; redoing an operation moves the bead pointer forwards. The # bead pointer points in front of the first bead when Undo is disabled. The bead # pointer points at the last bead when Redo is disabled. # # The Undo command uses the present bead to undo the action, then moves the bead # pointer backwards. The Redo command uses the bead after the present bead to redo # the action, then moves the bead pointer forwards. The list of beads does not # branch; all undoable operations (except the Undo and Redo commands themselves) # delete any beads following the newly created bead. # # New in Leo 4.3: User (client) code should call u.beforeX and u.afterX methods to # create a bead describing the operation that is being performed. (By convention, # the code sets u = c.undoer for undoable operations.) Most u.beforeX methods # return 'undoData' that the client code merely passes to the corresponding # u.afterX method. This data contains the 'before' snapshot. The u.afterX methods # then create a bead containing both the 'before' and 'after' snapshots. # # New in Leo 4.3: u.beforeChangeGroup and u.afterChangeGroup allow multiple calls # to u.beforeX and u.afterX methods to be treated as a single undoable entry. See # the code for the Replace All, Sort, Promote and Demote commands for examples. # u.before/afterChangeGroup substantially reduce the number of u.before/afterX # methods needed. # # New in Leo 4.3: It would be possible for plugins or other code to define their # own u.before/afterX methods. Indeed, u.afterX merely needs to set the # bunch.undoHelper and bunch.redoHelper ivars to the methods used to undo and redo # the operation. See the code for the various u.before/afterX methods for # guidance. # # New in Leo 4.3: p.setDirty and p.setAllAncestorAtFileNodesDirty now return a # 'dirtyVnodeList' that all vnodes that became dirty as the result of an # operation. More than one list may be generated: client code is responsible for # merging lists using the pattern dirtyVnodeList.extend(dirtyVnodeList2) # # I first saw this model of unlimited undo in the documentation for Apple's Yellow Box classes. #@-<< How Leo implements unlimited undo >> import leo.core.leoGlobals as g # pylint: disable=unpacking-non-sequence #@+others #@+node:ekr.20031218072017.3605: ** class Undoer class Undoer(object): """A class that implements unlimited undo and redo.""" # pylint: disable=not-an-iterable # pylint: disable=unsubscriptable-object # So that ivars can be inited to None rather thatn []. #@+others #@+node:ekr.20150509193307.1: *3* u.Birth #@+node:ekr.20031218072017.3606: *4* u.__init__ & reloadSettings def __init__(self, c): self.c = c self.debug_Undoer = False # True: enable debugging code in new undo scheme. self.debug_print = False # True: enable print statements in debug code. self.granularity = None # Set in reloadSettings. self.max_undo_stack_size = c.config.getInt('max_undo_stack_size') or 0 # Statistics comparing old and new ways (only if self.debug_Undoer is on). self.new_mem = 0 self.old_mem = 0 # State ivars... self.beads = [] # List of undo nodes. self.bead = -1 # Index of the present bead: -1:len(beads) self.undoType = "Can't Undo" # These must be set here, _not_ in clearUndoState. self.redoMenuLabel = "Can't Redo" self.undoMenuLabel = "Can't Undo" self.realRedoMenuLabel = "Can't Redo" self.realUndoMenuLabel = "Can't Undo" self.undoing = False # True if executing an Undo command. self.redoing = False # True if executing a Redo command. self.per_node_undo = False # True: v may contain undo_info ivar. # New in 4.2... self.optionalIvars = [] # Set the following ivars to keep pylint happy. self.afterTree = None self.beforeTree = None self.children = None self.deleteMarkedNodesData = None self.dirtyVnodeList = None self.followingSibs = None self.inHead = None self.kind = None self.newBack = None self.newBody = None self.newChanged = None self.newChildren = None self.newHead = None self.newMarked = None self.newN = None self.newP = None self.newParent = None self.newParent_v = None self.newRecentFiles = None self.newSel = None self.newTree = None self.newYScroll = None self.oldBack = None self.oldBody = None self.oldChanged = None self.oldChildren = None self.oldHead = None self.oldMarked = None self.oldN = None self.oldParent = None self.oldParent_v = None self.oldRecentFiles = None self.oldSel = None self.oldTree = None self.oldYScroll = None self.pasteAsClone = None self.prevSel = None self.sortChildren = None self.verboseUndoGroup = None self.reloadSettings() def reloadSettings(self): '''Undoer.reloadSettings.''' c = self.c self.granularity = c.config.getString('undo_granularity') if self.granularity: self.granularity = self.granularity.lower() if self.granularity not in ('node', 'line', 'word', 'char'): self.granularity = 'line' def redoHelper(self): pass def undoHelper(self): pass #@+node:ekr.20150509193222.1: *4* u.cmd (decorator) def cmd(name): '''Command decorator for the Undoer class.''' # pylint: disable=no-self-argument return g.new_cmd_decorator(name, ['c', 'undoer', ]) #@+node:ekr.20050416092908.1: *3* u.Internal helpers #@+node:ekr.20031218072017.3607: *4* u.clearOptionalIvars def clearOptionalIvars(self): u = self u.p = None # The position/node being operated upon for undo and redo. for ivar in u.optionalIvars: setattr(u, ivar, None) #@+node:ekr.20060127052111.1: *4* u.cutStack def cutStack(self): u = self; n = u.max_undo_stack_size if n > 0 and u.bead >= n and not g.app.unitTesting: # Do nothing if we are in the middle of creating a group. i = len(u.beads) - 1 while i >= 0: bunch = u.beads[i] if hasattr(bunch, 'kind') and bunch.kind == 'beforeGroup': return i -= 1 # This work regardless of how many items appear after bead n. # g.trace('Cutting undo stack to %d entries' % (n)) u.beads = u.beads[-n:] u.bead = n - 1 #@+node:ekr.20080623083646.10: *4* u.dumpBead def dumpBead(self, n): u = self if n < 0 or n >= len(u.beads): return 'no bead: n = ', n # bunch = u.beads[n] result = [] result.append('-' * 10) result.append('len(u.beads): %s, n: %s' % (len(u.beads), n)) for ivar in ('kind', 'newP', 'newN', 'p', 'oldN', 'undoHelper'): result.append('%s = %s' % (ivar, getattr(self, ivar))) return '\n'.join(result) def dumpTopBead(self): u = self n = len(u.beads) if n > 0: return self.dumpBead(n - 1) else: return '<no top bead>' #@+node:EKR.20040526150818: *4* u.getBead def getBead(self, n): '''Set Undoer ivars from the bunch at the top of the undo stack.''' u = self if n < 0 or n >= len(u.beads): return None bunch = u.beads[n] self.setIvarsFromBunch(bunch) return bunch #@+node:EKR.20040526150818.1: *4* u.peekBead def peekBead(self, n): u = self if n < 0 or n >= len(u.beads): return None else: return u.beads[n] #@+node:ekr.20060127113243: *4* u.pushBead def pushBead(self, bunch): u = self # New in 4.4b2: Add this to the group if it is being accumulated. bunch2 = u.bead >= 0 and u.bead < len(u.beads) and u.beads[u.bead] if bunch2 and hasattr(bunch2, 'kind') and bunch2.kind == 'beforeGroup': # Just append the new bunch the group's items. bunch2.items.append(bunch) else: # Push the bunch. u.bead += 1 u.beads[u.bead:] = [bunch] # Recalculate the menu labels. u.setUndoTypes() #@+node:ekr.20050126081529: *4* u.recognizeStartOfTypingWord def recognizeStartOfTypingWord(self, old_lines, old_row, old_col, old_ch, new_lines, new_row, new_col, new_ch, prev_row, prev_col ): ''' A potentially user-modifiable method that should return True if the typing indicated by the params starts a new 'word' for the purposes of undo with 'word' granularity. u.setUndoTypingParams calls this method only when the typing could possibly continue a previous word. In other words, undo will work safely regardless of the value returned here. old_ch is the char at the given (Tk) row, col of old_lines. new_ch is the char at the given (Tk) row, col of new_lines. The present code uses only old_ch and new_ch. The other arguments are given for use by more sophisticated algorithms.''' # Start a word if new_ch begins whitespace + word new_word_started = not old_ch.isspace() and new_ch.isspace() # Start a word if the cursor has been moved since the last change moved_cursor = new_row != prev_row or new_col != prev_col + 1 return new_word_started or moved_cursor #@+node:ekr.20031218072017.3613: *4* u.redoMenuName, undoMenuName def redoMenuName(self, name): if name == "Can't Redo": return name else: return "Redo " + name def undoMenuName(self, name): if name == "Can't Undo": return name else: return "Undo " + name #@+node:ekr.20060127070008: *4* u.setIvarsFromBunch def setIvarsFromBunch(self, bunch): u = self u.clearOptionalIvars() if 0: # Debugging. g.pr('-' * 40) for key in sorted(bunch): g.trace(key, bunch.get(key)) g.pr('-' * 20) # bunch is not a dict, so bunch.keys() is required. for key in list(bunch.keys()): val = bunch.get(key) setattr(u, key, val) if key not in u.optionalIvars: u.optionalIvars.append(key) #@+node:ekr.20031218072017.3614: *4* u.setRedoType # These routines update both the ivar and the menu label. def setRedoType(self, theType): u = self; frame = u.c.frame if not g.isString(theType): g.trace('oops: expected string for command, got %s' % repr(theType)) g.trace(g.callers()) theType = '<unknown>' menu = frame.menu.getMenu("Edit") name = u.redoMenuName(theType) if name != u.redoMenuLabel: # Update menu using old name. realLabel = frame.menu.getRealMenuName(name) if realLabel == name: underline = -1 if g.match(name, 0, "Can't") else 0 else: underline = realLabel.find("&") realLabel = realLabel.replace("&", "") frame.menu.setMenuLabel(menu, u.realRedoMenuLabel, realLabel, underline=underline) u.redoMenuLabel = name u.realRedoMenuLabel = realLabel #@+node:ekr.20091221145433.6381: *4* u.setUndoType def setUndoType(self, theType): u = self; frame = u.c.frame if not g.isString(theType): g.trace('oops: expected string for command, got %s' % repr(theType)) g.trace(g.callers()) theType = '<unknown>' menu = frame.menu.getMenu("Edit") name = u.undoMenuName(theType) if name != u.undoMenuLabel: # Update menu using old name. realLabel = frame.menu.getRealMenuName(name) if realLabel == name: underline = -1 if g.match(name, 0, "Can't") else 0 else: underline = realLabel.find("&") realLabel = realLabel.replace("&", "") frame.menu.setMenuLabel(menu, u.realUndoMenuLabel, realLabel, underline=underline) u.undoType = theType u.undoMenuLabel = name u.realUndoMenuLabel = realLabel #@+node:ekr.20031218072017.3616: *4* u.setUndoTypes def setUndoTypes(self): u = self # Set the undo type and undo menu label. bunch = u.peekBead(u.bead) if bunch: u.setUndoType(bunch.undoType) else: u.setUndoType("Can't Undo") # Set only the redo menu label. bunch = u.peekBead(u.bead + 1) if bunch: u.setRedoType(bunch.undoType) else: u.setRedoType("Can't Redo") u.cutStack() #@+node:EKR.20040530121329: *4* u.restoreTree & helpers def restoreTree(self, treeInfo): """Use the tree info to restore all VNode data, including all links.""" u = self # This effectively relinks all vnodes. for v, vInfo, tInfo in treeInfo: u.restoreVnodeUndoInfo(vInfo) u.restoreTnodeUndoInfo(tInfo) #@+node:ekr.20050415170737.2: *5* u.restoreVnodeUndoInfo def restoreVnodeUndoInfo(self, bunch): """Restore all ivars saved in the bunch.""" v = bunch.v v.statusBits = bunch.statusBits v.children = bunch.children v.parents = bunch.parents uA = bunch.get('unknownAttributes') if uA is not None: v.unknownAttributes = uA v._p_changed = 1 #@+node:ekr.20050415170812.2: *5* u.restoreTnodeUndoInfo def restoreTnodeUndoInfo(self, bunch): v = bunch.v v.h = bunch.headString v.b = bunch.bodyString v.statusBits = bunch.statusBits uA = bunch.get('unknownAttributes') if uA is not None: v.unknownAttributes = uA v._p_changed = 1 #@+node:EKR.20040528075307: *4* u.saveTree & helpers def saveTree(self, p, treeInfo=None): """Return a list of tuples with all info needed to handle a general undo operation.""" # WARNING: read this before doing anything "clever" #@+<< about u.saveTree >> #@+node:EKR.20040530114124: *5* << about u.saveTree >> #@+at The old code made a free-standing copy of the tree using v.copy and # t.copy. This looks "elegant" and is WRONG. The problem is that it can # not handle clones properly, especially when some clones were in the # "undo" tree and some were not. Moreover, it required complex # adjustments to t.vnodeLists. # # Instead of creating new nodes, the new code creates all information # needed to properly restore the vnodes and tnodes. It creates a list of # tuples, on tuple for each VNode in the tree. Each tuple has the form, # # (vnodeInfo, tnodeInfo) # # where vnodeInfo and tnodeInfo are dicts contain all info needed to # recreate the nodes. The v.createUndoInfoDict and t.createUndoInfoDict # methods correspond to the old v.copy and t.copy methods. # # Aside: Prior to 4.2 Leo used a scheme that was equivalent to the # createUndoInfoDict info, but quite a bit uglier. #@-<< about u.saveTree >> u = self; topLevel = (treeInfo is None) if topLevel: treeInfo = [] # Add info for p.v. Duplicate tnode info is harmless. data = (p.v, u.createVnodeUndoInfo(p.v), u.createTnodeUndoInfo(p.v)) treeInfo.append(data) # Recursively add info for the subtree. child = p.firstChild() while child: self.saveTree(child, treeInfo) child = child.next() return treeInfo #@+node:ekr.20050415170737.1: *5* u.createVnodeUndoInfo def createVnodeUndoInfo(self, v): """Create a bunch containing all info needed to recreate a VNode for undo.""" bunch = g.Bunch( v=v, statusBits=v.statusBits, parents=v.parents[:], children=v.children[:], ) if hasattr(v, 'unknownAttributes'): bunch.unknownAttributes = v.unknownAttributes return bunch #@+node:ekr.20050415170812.1: *5* u.createTnodeUndoInfo def createTnodeUndoInfo(self, v): """Create a bunch containing all info needed to recreate a VNode.""" bunch = g.Bunch( v=v, headString=v.h, bodyString=v.b, statusBits=v.statusBits, ) if hasattr(v, 'unknownAttributes'): bunch.unknownAttributes = v.unknownAttributes return bunch #@+node:ekr.20050525151449: *4* u.trace def trace(self): ivars = ('kind', 'undoType') for ivar in ivars: g.pr(ivar, getattr(self, ivar)) #@+node:ekr.20050410095424: *4* u.updateMarks def updateMarks(self, oldOrNew): '''Update dirty and marked bits.''' u = self; c = u.c if oldOrNew not in ('new', 'old'): g.trace("can't happen") return isOld = oldOrNew == 'old' marked = u.oldMarked if isOld else u.newMarked if marked: c.setMarked(u.p) else: c.clearMarked(u.p) # Bug fix: Leo 4.4.6: Undo/redo always set changed/dirty bits # because the file may have been saved. u.p.setDirty(setDescendentsDirty=False) u.p.setAllAncestorAtFileNodesDirty(setDescendentsDirty=False) # Bug fix: Leo 4.4.6 u.c.setChanged(True) #@+node:ekr.20031218072017.3608: *3* u.Externally visible entries #@+node:ekr.20050318085432.4: *4* u.afterX... #@+node:ekr.20050315134017.4: *5* u.afterChangeGroup def afterChangeGroup(self, p, undoType, reportFlag=False, dirtyVnodeList=None): '''Create an undo node for general tree operations using d created by beforeChangeGroup''' u = self; c = self.c w = c.frame.body.wrapper if u.redoing or u.undoing: return if dirtyVnodeList is None: dirtyVnodeList = [] bunch = u.beads[u.bead] if not u.beads: g.trace('oops: empty undo stack.') return if bunch.kind == 'beforeGroup': bunch.kind = 'afterGroup' else: g.trace('oops: expecting beforeGroup, got %s' % bunch.kind) # Set the types & helpers. bunch.kind = 'afterGroup' bunch.undoType = undoType # Set helper only for undo: # The bead pointer will point to an 'beforeGroup' bead for redo. bunch.undoHelper = u.undoGroup bunch.redoHelper = u.redoGroup bunch.dirtyVnodeList = dirtyVnodeList bunch.newP = p.copy() bunch.newSel = w.getSelectionRange() # Tells whether to report the number of separate changes undone/redone. bunch.reportFlag = reportFlag if 0: # Push the bunch. u.bead += 1 u.beads[u.bead:] = [bunch] # Recalculate the menu labels. u.setUndoTypes() #@+node:ekr.20050315134017.2: *5* u.afterChangeNodeContents def afterChangeNodeContents(self, p, command, bunch, dirtyVnodeList=None, inHead=False): '''Create an undo node using d created by beforeChangeNode.''' u = self; c = self.c; w = c.frame.body.wrapper if u.redoing or u.undoing: return if dirtyVnodeList is None: dirtyVnodeList = [] # Set the type & helpers. bunch.kind = 'node' bunch.undoType = command bunch.undoHelper = u.undoNodeContents bunch.redoHelper = u.redoNodeContents bunch.dirtyVnodeList = dirtyVnodeList bunch.inHead = inHead # 2013/08/26 bunch.newBody = p.b bunch.newChanged = u.c.isChanged() bunch.newDirty = p.isDirty() bunch.newHead = p.h bunch.newMarked = p.isMarked() # Bug fix 2017/11/12: don't use ternary operator. if w: bunch.newSel = w.getSelectionRange() else: bunch.newSel = 0, 0 bunch.newYScroll = w.getYScrollPosition() if w else 0 u.pushBead(bunch) #@+node:ekr.20050315134017.3: *5* u.afterChangeTree def afterChangeTree(self, p, command, bunch): '''Create an undo node for general tree operations using d created by beforeChangeTree''' u = self; c = self.c; w = c.frame.body.wrapper if u.redoing or u.undoing: return # Set the types & helpers. bunch.kind = 'tree' bunch.undoType = command bunch.undoHelper = u.undoTree bunch.redoHelper = u.redoTree # Set by beforeChangeTree: changed, oldSel, oldText, oldTree, p bunch.newSel = w.getSelectionRange() bunch.newText = w.getAllText() bunch.newTree = u.saveTree(p) u.pushBead(bunch) #@+node:ekr.20050424161505: *5* u.afterClearRecentFiles def afterClearRecentFiles(self, bunch): u = self bunch.newRecentFiles = g.app.config.recentFiles[:] bunch.undoType = 'Clear Recent Files' bunch.undoHelper = u.undoClearRecentFiles bunch.redoHelper = u.redoClearRecentFiles u.pushBead(bunch) return bunch #@+node:ekr.20111006060936.15639: *5* u.afterCloneMarkedNodes def afterCloneMarkedNodes(self, p): u = self; c = u.c if u.redoing or u.undoing: return bunch = u.createCommonBunch(p) # Sets # oldChanged = c.isChanged(), # oldDirty = p.isDirty(), # oldMarked = p.isMarked(), # oldSel = w and w.getSelectionRange() or None, # p = p.copy(), # Set types & helpers bunch.kind = 'clone-marked-nodes' bunch.undoType = 'clone-marked-nodes' # Set helpers bunch.undoHelper = u.undoCloneMarkedNodes bunch.redoHelper = u.redoCloneMarkedNodes bunch.newP = p.next() bunch.newChanged = c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() u.pushBead(bunch) #@+node:ekr.20160502175451.1: *5* u.afterCopyMarkedNodes def afterCopyMarkedNodes(self, p): u = self; c = u.c if u.redoing or u.undoing: return bunch = u.createCommonBunch(p) # Sets # oldChanged = c.isChanged(), # oldDirty = p.isDirty(), # oldMarked = p.isMarked(), # oldSel = w and w.getSelectionRange() or None, # p = p.copy(), # Set types & helpers bunch.kind = 'copy-marked-nodes' bunch.undoType = 'copy-marked-nodes' # Set helpers bunch.undoHelper = u.undoCopyMarkedNodes bunch.redoHelper = u.redoCopyMarkedNodes bunch.newP = p.next() bunch.newChanged = c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() u.pushBead(bunch) #@+node:ekr.20050411193627.5: *5* u.afterCloneNode def afterCloneNode(self, p, command, bunch, dirtyVnodeList=None): u = self; c = u.c if u.redoing or u.undoing: return if dirtyVnodeList is None: dirtyVnodeList = [] # Set types & helpers bunch.kind = 'clone' bunch.undoType = command # Set helpers bunch.undoHelper = u.undoCloneNode bunch.redoHelper = u.redoCloneNode bunch.newBack = p.back() # 6/15/05 bunch.newParent = p.parent() # 6/15/05 bunch.newP = p.copy() bunch.dirtyVnodeList = dirtyVnodeList bunch.newChanged = c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() u.pushBead(bunch) #@+node:ekr.20050411193627.6: *5* u.afterDehoist def afterDehoist(self, p, command): u = self if u.redoing or u.undoing: return bunch = u.createCommonBunch(p) # Set types & helpers bunch.kind = 'dehoist' bunch.undoType = command # Set helpers bunch.undoHelper = u.undoDehoistNode bunch.redoHelper = u.redoDehoistNode u.pushBead(bunch) #@+node:ekr.20050411193627.8: *5* u.afterDeleteNode def afterDeleteNode(self, p, command, bunch, dirtyVnodeList=None): u = self; c = u.c if u.redoing or u.undoing: return if dirtyVnodeList is None: dirtyVnodeList = [] # Set types & helpers bunch.kind = 'delete' bunch.undoType = command # Set helpers bunch.undoHelper = u.undoDeleteNode bunch.redoHelper = u.redoDeleteNode bunch.newP = p.copy() bunch.dirtyVnodeList = dirtyVnodeList bunch.newChanged = c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() u.pushBead(bunch) #@+node:ekr.20111005152227.15555: *5* u.afterDeleteMarkedNodes def afterDeleteMarkedNodes(self, data, p): u = self; c = u.c if u.redoing or u.undoing: return bunch = u.createCommonBunch(p) # Set types & helpers bunch.kind = 'delete-marked-nodes' bunch.undoType = 'delete-marked-nodes' # Set helpers bunch.undoHelper = u.undoDeleteMarkedNodes bunch.redoHelper = u.redoDeleteMarkedNodes bunch.newP = p.copy() bunch.deleteMarkedNodesData = data # bunch.dirtyVnodeList = dirtyVnodeList bunch.newChanged = c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() u.pushBead(bunch) #@+node:ekr.20080425060424.8: *5* u.afterDemote def afterDemote(self, p, followingSibs, dirtyVnodeList): '''Create an undo node for demote operations.''' u = self bunch = u.createCommonBunch(p) # Set types. bunch.kind = 'demote' bunch.undoType = 'Demote' bunch.undoHelper = u.undoDemote bunch.redoHelper = u.redoDemote bunch.followingSibs = followingSibs # Push the bunch. u.bead += 1 u.beads[u.bead:] = [bunch] # Recalculate the menu labels. u.setUndoTypes() #@+node:ekr.20050411193627.7: *5* u.afterHoist def afterHoist(self, p, command): u = self if u.redoing or u.undoing: return bunch = u.createCommonBunch(p) # Set types & helpers bunch.kind = 'hoist' bunch.undoType = command # Set helpers bunch.undoHelper = u.undoHoistNode bunch.redoHelper = u.redoHoistNode u.pushBead(bunch) #@+node:ekr.20050411193627.9: *5* u.afterInsertNode def afterInsertNode(self, p, command, bunch, dirtyVnodeList=None): u = self; c = u.c if u.redoing or u.undoing: return if dirtyVnodeList is None: dirtyVnodeList = [] # Set types & helpers bunch.kind = 'insert' bunch.undoType = command # Set helpers bunch.undoHelper = u.undoInsertNode bunch.redoHelper = u.redoInsertNode bunch.newP = p.copy() bunch.dirtyVnodeList = dirtyVnodeList bunch.newBack = p.back() bunch.newParent = p.parent() bunch.newChanged = c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() if bunch.pasteAsClone: beforeTree = bunch.beforeTree afterTree = [] for bunch2 in beforeTree: v = bunch2.v afterTree.append( g.Bunch(v=v, head=v.h[:], body=v.b[:])) bunch.afterTree = afterTree u.pushBead(bunch) #@+node:ekr.20050526124257: *5* u.afterMark def afterMark(self, p, command, bunch, dirtyVnodeList=None): '''Create an undo node for mark and unmark commands.''' # 'command' unused, but present for compatibility with similar methods. u = self if u.redoing or u.undoing: return if dirtyVnodeList is None: dirtyVnodeList = [] # Set the type & helpers. bunch.undoHelper = u.undoMark bunch.redoHelper = u.redoMark bunch.dirtyVnodeList = dirtyVnodeList bunch.newChanged = u.c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() u.pushBead(bunch) #@+node:ekr.20050410110343: *5* u.afterMoveNode def afterMoveNode(self, p, command, bunch, dirtyVnodeList=None): u = self; c = u.c if u.redoing or u.undoing: return if dirtyVnodeList is None: dirtyVnodeList = [] # Set the types & helpers. bunch.kind = 'move' bunch.undoType = command # Set helper only for undo: # The bead pointer will point to an 'beforeGroup' bead for redo. bunch.undoHelper = u.undoMove bunch.redoHelper = u.redoMove bunch.dirtyVnodeList = dirtyVnodeList bunch.newChanged = c.isChanged() bunch.newDirty = p.isDirty() bunch.newMarked = p.isMarked() bunch.newN = p.childIndex() bunch.newParent_v = p._parentVnode() bunch.newP = p.copy() u.pushBead(bunch) #@+node:ekr.20080425060424.12: *5* u.afterPromote def afterPromote(self, p, children, dirtyVnodeList): '''Create an undo node for demote operations.''' u = self bunch = u.createCommonBunch(p) # Set types. bunch.kind = 'promote' bunch.undoType = 'Promote' bunch.undoHelper = u.undoPromote bunch.redoHelper = u.redoPromote bunch.children = children # Push the bunch. u.bead += 1 u.beads[u.bead:] = [bunch] # Recalculate the menu labels. u.setUndoTypes() #@+node:ekr.20080425060424.2: *5* u.afterSort def afterSort(self, p, bunch, dirtyVnodeList): '''Create an undo node for sort operations''' u = self # c = self.c if u.redoing or u.undoing: return bunch.dirtyVnodeList = dirtyVnodeList # Recalculate the menu labels. u.setUndoTypes() #@+node:ekr.20050318085432.3: *4* u.beforeX... #@+node:ekr.20050315134017.7: *5* u.beforeChangeGroup def beforeChangeGroup(self, p, command, verboseUndoGroup=True): '''Prepare to undo a group of undoable operations.''' u = self bunch = u.createCommonBunch(p) # Set types. bunch.kind = 'beforeGroup' bunch.undoType = command bunch.verboseUndoGroup = verboseUndoGroup # Set helper only for redo: # The bead pointer will point to an 'afterGroup' bead for undo. bunch.undoHelper = u.undoGroup bunch.redoHelper = u.redoGroup bunch.items = [] # Push the bunch. u.bead += 1 u.beads[u.bead:] = [bunch] #@+node:ekr.20050315133212.2: *5* u.beforeChangeNodeContents def beforeChangeNodeContents(self, p, oldBody=None, oldHead=None, oldYScroll=None): '''Return data that gets passed to afterChangeNode''' u = self bunch = u.createCommonBunch(p) bunch.oldBody = oldBody or p.b bunch.oldHead = oldHead or p.h bunch.oldYScroll = oldYScroll return bunch #@+node:ekr.20050315134017.6: *5* u.beforeChangeTree def beforeChangeTree(self, p): u = self; c = u.c w = c.frame.body.wrapper bunch = u.createCommonBunch(p) bunch.oldSel = w.getSelectionRange() bunch.oldText = w.getAllText() bunch.oldTree = u.saveTree(p) return bunch #@+node:ekr.20050424161505.1: *5* u.beforeClearRecentFiles def beforeClearRecentFiles(self): u = self; p = u.c.p bunch = u.createCommonBunch(p) bunch.oldRecentFiles = g.app.config.recentFiles[:] return bunch #@+node:ekr.20050412080354: *5* u.beforeCloneNode def beforeCloneNode(self, p): u = self bunch = u.createCommonBunch(p) return bunch #@+node:ekr.20050411193627.3: *5* u.beforeDeleteNode def beforeDeleteNode(self, p): u = self bunch = u.createCommonBunch(p) bunch.oldBack = p.back() bunch.oldParent = p.parent() return bunch #@+node:ekr.20050411193627.4: *5* u.beforeInsertNode def beforeInsertNode(self, p, pasteAsClone=False, copiedBunchList=None): u = self if copiedBunchList is None: copiedBunchList = [] bunch = u.createCommonBunch(p) bunch.pasteAsClone = pasteAsClone if pasteAsClone: # Save the list of bunched. bunch.beforeTree = copiedBunchList return bunch #@+node:ekr.20050526131252: *5* u.beforeMark def beforeMark(self, p, command): u = self bunch = u.createCommonBunch(p) bunch.kind = 'mark' bunch.undoType = command return bunch #@+node:ekr.20050410110215: *5* u.beforeMoveNode def beforeMoveNode(self, p): u = self bunch = u.createCommonBunch(p) bunch.oldN = p.childIndex() bunch.oldParent_v = p._parentVnode() return bunch #@+node:ekr.20080425060424.3: *5* u.beforeSort def beforeSort(self, p, undoType, oldChildren, newChildren, sortChildren): '''Create an undo node for sort operations.''' u = self bunch = u.createCommonBunch(p) # Set types. bunch.kind = 'sort' bunch.undoType = undoType bunch.undoHelper = u.undoSort bunch.redoHelper = u.redoSort bunch.oldChildren = oldChildren bunch.newChildren = newChildren bunch.sortChildren = sortChildren # A bool # Push the bunch. u.bead += 1 u.beads[u.bead:] = [bunch] return bunch #@+node:ekr.20050318085432.2: *5* u.createCommonBunch def createCommonBunch(self, p): '''Return a bunch containing all common undo info. This is mostly the info for recreating an empty node at position p.''' u = self; c = u.c; w = c.frame.body.wrapper return g.Bunch( oldChanged=c.isChanged(), oldDirty=p and p.isDirty(), oldMarked=p and p.isMarked(), oldSel=w and w.getSelectionRange() or None, p=p and p.copy(), ) #@+node:ekr.20031218072017.3610: *4* u.canRedo & canUndo # Translation does not affect these routines. def canRedo(self): u = self return u.redoMenuLabel != "Can't Redo" def canUndo(self): u = self return u.undoMenuLabel != "Can't Undo" #@+node:ekr.20031218072017.3609: *4* u.clearUndoState def clearUndoState(self): """Clears then entire Undo state. All non-undoable commands should call this method.""" u = self u.clearOptionalIvars() # Do this first. u.setRedoType("Can't Redo") u.setUndoType("Can't Undo") u.beads = [] # List of undo nodes. u.bead = -1 # Index of the present bead: -1:len(beads) #@+node:ekr.20031218072017.3611: *4* u.enableMenuItems def enableMenuItems(self): u = self; frame = u.c.frame menu = frame.menu.getMenu("Edit") if menu: frame.menu.enableMenu(menu, u.redoMenuLabel, u.canRedo()) frame.menu.enableMenu(menu, u.undoMenuLabel, u.canUndo()) #@+node:ekr.20110519074734.6094: *4* u.onSelect & helpers def onSelect(self, old_p, p): u = self if u.per_node_undo: if old_p and u.beads: u.putIvarsToVnode(old_p) u.setIvarsFromVnode(p) u.setUndoTypes() #@+node:ekr.20110519074734.6096: *5* u.putIvarsToVnode def putIvarsToVnode(self, p): u = self; v = p.v assert self.per_node_undo bunch = g.bunch() for key in self.optionalIvars: bunch[key] = getattr(u, key) # Put these ivars by hand. for key in ('bead', 'beads', 'undoType',): bunch[key] = getattr(u, key) v.undo_info = bunch #@+node:ekr.20110519074734.6095: *5* u.setIvarsFromVnode def setIvarsFromVnode(self, p): u = self; v = p.v assert self.per_node_undo u.clearUndoState() if hasattr(v, 'undo_info'): u.setIvarsFromBunch(v.undo_info) #@+node:ekr.20031218072017.1490: *4* u.setUndoTypingParams def setUndoTypingParams(self, p, undo_type, oldText, newText, oldSel=None, newSel=None, oldYview=None, ): ''' Save enough information to undo or redo typing operation. Do nothing when called from the undo/redo logic because the Undo and Redo commands merely reset the bead pointer. ''' u = self; c = u.c #@+<< return if there is nothing to do >> #@+node:ekr.20040324061854: *5* << return if there is nothing to do >> if u.redoing or u.undoing: return None if undo_type is None: return None if undo_type == "Can't Undo": u.clearUndoState() u.setUndoTypes() # Must still recalculate the menu labels. return None if oldText == newText: u.setUndoTypes() # Must still recalculate the menu labels. return None #@-<< return if there is nothing to do >> #@+<< init the undo params >> #@+node:ekr.20040324061854.1: *5* << init the undo params >> # Clear all optional params. # for ivar in u.optionalIvars: # setattr(u,ivar,None) u.clearOptionalIvars() # Set the params. u.undoType = undo_type u.p = p.copy() #@-<< init the undo params >> #@+<< compute leading, middle & trailing lines >> #@+node:ekr.20031218072017.1491: *5* << compute leading, middle & trailing lines >> #@+at Incremental undo typing is similar to incremental syntax coloring. We compute # the number of leading and trailing lines that match, and save both the old and # new middle lines. NB: the number of old and new middle lines may be different. #@@c old_lines = oldText.split('\n') new_lines = newText.split('\n') new_len = len(new_lines) old_len = len(old_lines) min_len = min(old_len, new_len) i = 0 while i < min_len: if old_lines[i] != new_lines[i]: break i += 1 leading = i if leading == new_len: # This happens when we remove lines from the end. # The new text is simply the leading lines from the old text. trailing = 0 else: i = 0 while i < min_len - leading: if old_lines[old_len - i - 1] != new_lines[new_len - i - 1]: break i += 1 trailing = i # NB: the number of old and new middle lines may be different. if trailing == 0: old_middle_lines = old_lines[leading:] new_middle_lines = new_lines[leading:] else: old_middle_lines = old_lines[leading: -trailing] new_middle_lines = new_lines[leading: -trailing] # Remember how many trailing newlines in the old and new text. i = len(oldText) - 1; old_newlines = 0 while i >= 0 and oldText[i] == '\n': old_newlines += 1 i -= 1 i = len(newText) - 1; new_newlines = 0 while i >= 0 and newText[i] == '\n': new_newlines += 1 i -= 1 #@-<< compute leading, middle & trailing lines >> #@+<< save undo text info >> #@+node:ekr.20031218072017.1492: *5* << save undo text info >> #@+at This is the start of the incremental undo algorithm. # # We must save enough info to do _both_ of the following: # # Undo: Given newText, recreate oldText. # Redo: Given oldText, recreate oldText. # # The "given" texts for the undo and redo routines are simply p.b. #@@c if u.debug_Undoer: # Remember the complete text for comparisons... u.oldText = oldText u.newText = newText # Compute statistics comparing old and new ways... # The old doesn't often store the old text, so don't count it here. u.old_mem += len(newText) s1 = '\n'.join(old_middle_lines) s2 = '\n'.join(new_middle_lines) u.new_mem += len(s1) + len(s2) else: u.oldText = None u.newText = None u.leading = leading u.trailing = trailing u.oldMiddleLines = old_middle_lines u.newMiddleLines = new_middle_lines u.oldNewlines = old_newlines u.newNewlines = new_newlines #@-<< save undo text info >> #@+<< save the selection and scrolling position >> #@+node:ekr.20040324061854.2: *5* << save the selection and scrolling position >> # Remember the selection. u.oldSel = oldSel u.newSel = newSel # Remember the scrolling position. if oldYview: u.yview = oldYview else: u.yview = c.frame.body.wrapper.getYScrollPosition() #@-<< save the selection and scrolling position >> #@+<< adjust the undo stack, clearing all forward entries >> #@+node:ekr.20040324061854.3: *5* << adjust the undo stack, clearing all forward entries >> #@+at New in Leo 4.3. Instead of creating a new bead on every character, we # may adjust the top bead: # # word granularity: adjust the top bead if the typing would continue the word. # line granularity: adjust the top bead if the typing is on the same line. # node granularity: adjust the top bead if the typing is anywhere on the same node. #@@c granularity = u.granularity old_d = u.peekBead(u.bead) old_p = old_d and old_d.get('p') #@+<< set newBead if we can't share the previous bead >> #@+node:ekr.20050125220613: *6* << set newBead if we can't share the previous bead >> #@+at We must set newBead to True if undo_type is not 'Typing' so that commands that # get treated like typing (by updateBodyPane and onBodyChanged) don't get lumped # with 'real' typing. #@@c if ( not old_d or not old_p or old_p.v != p.v or old_d.get('kind') != 'typing' or old_d.get('undoType') != 'Typing' or undo_type != 'Typing' ): newBead = True # We can't share the previous node. elif granularity == 'char': newBead = True # This was the old way. elif granularity == 'node': newBead = False # Always replace previous bead. else: assert granularity in ('line', 'word') # Replace the previous bead if only the middle lines have changed. newBead = ( old_d.get('leading', 0) != u.leading or old_d.get('trailing', 0) != u.trailing ) if granularity == 'word' and not newBead: # Protect the method that may be changed by the user try: #@+<< set newBead if the change does not continue a word >> #@+node:ekr.20050125203937: *7* << set newBead if the change does not continue a word >> # Fix #653: undoer problem: be wary of the ternary operator here. old_start = old_end = new_start = new_end = 0 if oldSel: old_start, old_end = oldSel if newSel: new_start, new_end = newSel prev_start, prev_end = u.prevSel if old_start != old_end or new_start != new_end: # The new and old characters are not contiguous. newBead = True else: # 2011/04/01: Patch by Sam Hartsfield old_row, old_col = g.convertPythonIndexToRowCol(oldText, old_start) new_row, new_col = g.convertPythonIndexToRowCol(newText, new_start) prev_row, prev_col = g.convertPythonIndexToRowCol(oldText, prev_start) old_lines = g.splitLines(oldText) new_lines = g.splitLines(newText) # Recognize backspace, del, etc. as contiguous. if old_row != new_row or abs(old_col - new_col) != 1: # The new and old characters are not contiguous. newBead = True elif old_col == 0 or new_col == 0: # py-lint: disable=W0511 # W0511:1362: TODO # TODO this is not true, we might as well just have entered a # char at the beginning of an existing line pass # We have just inserted a line. else: # 2011/04/01: Patch by Sam Hartsfield old_s = old_lines[old_row] new_s = new_lines[new_row] # New in 4.3b2: # Guard against invalid oldSel or newSel params. if old_col - 1 >= len(old_s) or new_col - 1 >= len(new_s): newBead = True else: old_ch = old_s[old_col - 1] new_ch = new_s[new_col - 1] newBead = self.recognizeStartOfTypingWord( old_lines, old_row, old_col, old_ch, new_lines, new_row, new_col, new_ch, prev_row, prev_col) #@-<< set newBead if the change does not continue a word >> except Exception: g.error('Unexpected exception...') g.es_exception() newBead = True #@-<< set newBead if we can't share the previous bead >> # Save end selection as new "previous" selection u.prevSel = u.newSel if newBead: # Push params on undo stack, clearing all forward entries. bunch = g.Bunch( p=p.copy(), kind='typing', undoType=undo_type, undoHelper=u.undoTyping, redoHelper=u.redoTyping, oldText=u.oldText, oldSel=u.oldSel, oldNewlines=u.oldNewlines, oldMiddleLines=u.oldMiddleLines, ) u.pushBead(bunch) else: bunch = old_d bunch.dirtyVnodeList = p.setAllAncestorAtFileNodesDirty() # Bug fix: Leo 4.4.6: always add p to the list. bunch.dirtyVnodeList.append(p.copy()) bunch.leading = u.leading bunch.trailing = u.trailing bunch.newNewlines = u.newNewlines bunch.newMiddleLines = u.newMiddleLines bunch.newSel = u.newSel bunch.newText = u.newText bunch.yview = u.yview #@-<< adjust the undo stack, clearing all forward entries >> if u.per_node_undo: u.putIvarsToVnode(p) return bunch # Never used. #@+node:ekr.20031218072017.2030: *3* u.redo @cmd('redo') def redo(self, event=None): '''Redo the operation undone by the last undo.''' u = self; c = u.c w = c.frame.body.wrapper if not c.p: return # End editing *before* getting state. c.endEditing() if not u.canRedo(): return if not u.getBead(u.bead + 1): return u.redoing = True u.groupCount = 0 if u.redoHelper: u.redoHelper() else: g.trace('no redo helper for %s %s' % (u.kind, u.undoType)) c.checkOutline() # Redraw and recolor. c.frame.body.updateEditors() # New in Leo 4.4.8. if 0: # Don't do this: it interferes with selection ranges. # This strange code forces a recomputation of the root position. c.selectPosition(c.p) else: c.setCurrentPosition(c.p) if u.newChanged is None: u.newChanged = True c.setChanged(u.newChanged) # Redrawing *must* be done here before setting u.undoing to False. i, j = w.getSelectionRange() ins = w.getInsertPoint() c.redraw() c.recolor() if u.inHead: # 2013/08/26. c.editHeadline() u.inHead = False else: c.bodyWantsFocus() w.setSelectionRange(i, j, insert=ins) w.seeInsertPoint() u.redoing = False u.bead += 1 u.setUndoTypes() #@+node:ekr.20110519074734.6092: *3* u.redo helpers #@+node:ekr.20050424170219: *4* u.redoClearRecentFiles def redoClearRecentFiles(self): u = self; c = u.c rf = g.app.recentFilesManager rf.setRecentFiles(u.newRecentFiles[:]) rf.createRecentFilesMenuItems(c) #@+node:ekr.20111005152227.15558: *4* u.redoCloneMarkedNodes def redoCloneMarkedNodes(self): u = self; c = u.c c.selectPosition(u.p) c.cloneMarked() u.newP = c.p u.newChanged = c.isChanged() #@+node:ekr.20160502175557.1: *4* u.redoCopyMarkedNodes def redoCopyMarkedNodes(self): u = self; c = u.c c.selectPosition(u.p) c.copyMarked() u.newP = c.p u.newChanged = c.isChanged() #@+node:ekr.20050412083057: *4* u.redoCloneNode def redoCloneNode(self): u = self; c = u.c; cc = c.chapterController if cc: cc.selectChapterByName('main') if u.newBack: u.newP._linkAfter(u.newBack) elif u.newParent: u.newP._linkAsNthChild(u.newParent, 0) else: oldRoot = c.rootPosition() u.newP._linkAsRoot(oldRoot) for v in u.dirtyVnodeList: v.setDirty() c.selectPosition(u.newP) #@+node:ekr.20111005152227.15559: *4* u.redoDeleteMarkedNodes def redoDeleteMarkedNodes(self): u = self; c = u.c c.selectPosition(u.p) c.deleteMarked() c.selectPosition(u.newP) u.newChanged = c.isChanged() #@+node:EKR.20040526072519.2: *4* u.redoDeleteNode def redoDeleteNode(self): u = self; c = u.c c.selectPosition(u.p) c.deleteOutline() c.selectPosition(u.newP) #@+node:ekr.20080425060424.9: *4* u.redoDemote def redoDemote(self): u = self; c = u.c parent_v = u.p._parentVnode() n = u.p.childIndex() # Move the demoted nodes from the old parent to the new parent. parent_v.children = parent_v.children[: n + 1] u.p.v.children.extend(u.followingSibs) # Adjust the parent links of the moved nodes. # There is no need to adjust descendant links. for v in u.followingSibs: v.parents.remove(parent_v) v.parents.append(u.p.v) c.setCurrentPosition(u.p) #@+node:ekr.20050318085432.6: *4* u.redoGroup def redoGroup(self): '''Process beads until the matching 'afterGroup' bead is seen.''' u = self # Remember these values. c = u.c dirtyVnodeList = u.dirtyVnodeList or [] newSel = u.newSel p = u.p.copy() u.groupCount += 1 bunch = u.beads[u.bead]; count = 0 if not hasattr(bunch, 'items'): g.trace('oops: expecting bunch.items. bunch.kind = %s' % bunch.kind) g.trace(bunch) else: for z in bunch.items: self.setIvarsFromBunch(z) if z.redoHelper: z.redoHelper(); count += 1 else: g.trace('oops: no redo helper for %s %s' % (u.undoType, p.h)) u.groupCount -= 1 u.updateMarks('new') # Bug fix: Leo 4.4.6. for v in dirtyVnodeList: v.setDirty() if not g.unitTesting and u.verboseUndoGroup: g.es("redo", count, "instances") c.selectPosition(p) if newSel: i, j = newSel c.frame.body.wrapper.setSelectionRange(i, j) #@+node:ekr.20050412085138.1: *4* u.redoHoistNode & redoDehoistNode def redoHoistNode(self): u = self; c = u.c c.selectPosition(u.p) c.hoist() def redoDehoistNode(self): u = self; c = u.c c.selectPosition(u.p) c.dehoist() #@+node:ekr.20050412084532: *4* u.redoInsertNode def redoInsertNode(self): u = self; c = u.c; cc = c.chapterController if cc: cc.selectChapterByName('main') if u.newBack: u.newP._linkAfter(u.newBack) elif u.newParent: u.newP._linkAsNthChild(u.newParent, 0) else: oldRoot = c.rootPosition() u.newP._linkAsRoot(oldRoot) if u.pasteAsClone: for bunch in u.afterTree: v = bunch.v if u.newP.v == v: c.setBodyString(u.newP, bunch.body) c.setHeadString(u.newP, bunch.head) else: v.setBodyString(bunch.body) v.setHeadString(bunch.head) c.selectPosition(u.newP) #@+node:ekr.20050526125801: *4* u.redoMark def redoMark(self): u = self; c = u.c u.updateMarks('new') if u.groupCount == 0: for v in u.dirtyVnodeList: v.setDirty() c.selectPosition(u.p) #@+node:ekr.20050411111847: *4* u.redoMove def redoMove(self): u = self; c = u.c; cc = c.chapterController v = u.p.v assert(u.oldParent_v) assert(u.newParent_v) assert(v) if cc: cc.selectChapterByName('main') # Adjust the children arrays. assert u.oldParent_v.children[u.oldN] == v del u.oldParent_v.children[u.oldN] parent_v = u.newParent_v parent_v.children.insert(u.newN, v) v.parents.append(u.newParent_v) v.parents.remove(u.oldParent_v) u.updateMarks('new') for v in u.dirtyVnodeList: v.setDirty() c.selectPosition(u.newP) #@+node:ekr.20050318085432.7: *4* u.redoNodeContents def redoNodeContents(self): u = self; c = u.c; w = c.frame.body.wrapper # Restore the body. u.p.setBodyString(u.newBody) w.setAllText(u.newBody) c.frame.body.recolor(u.p) # Restore the headline. u.p.initHeadString(u.newHead) # This is required so. Otherwise redraw will revert the change! c.frame.tree.setHeadline(u.p, u.newHead) # New in 4.4b2. if u.groupCount == 0 and u.newSel: i, j = u.newSel w.setSelectionRange(i, j) if u.groupCount == 0 and u.newYScroll is not None: w.setYScrollPosition(u.newYScroll) u.updateMarks('new') for v in u.dirtyVnodeList: v.setDirty() #@+node:ekr.20080425060424.13: *4* u.redoPromote def redoPromote(self): u = self; c = u.c parent_v = u.p._parentVnode() # Add the children to parent_v's children. n = u.p.childIndex() + 1 old_children = parent_v.children[:] parent_v.children = old_children[: n] # Add children up to the promoted nodes. parent_v.children.extend(u.children) # Add the promoted nodes. parent_v.children.extend(old_children[n:]) # Add the children up to the promoted nodes. # Remove the old children. u.p.v.children = [] # Adjust the parent links in the moved children. # There is no need to adjust descendant links. for child in u.children: child.parents.remove(u.p.v) child.parents.append(parent_v) c.setCurrentPosition(u.p) #@+node:ekr.20080425060424.4: *4* u.redoSort def redoSort(self): u = self; c = u.c parent_v = u.p._parentVnode() parent_v.children = u.newChildren p = c.setPositionAfterSort(u.sortChildren) c.setCurrentPosition(p) #@+node:ekr.20050318085432.8: *4* u.redoTree def redoTree(self): '''Redo replacement of an entire tree.''' u = self; c = u.c u.p = self.undoRedoTree(u.p, u.oldTree, u.newTree) c.selectPosition(u.p) # Does full recolor. if u.newSel: i, j = u.newSel c.frame.body.wrapper.setSelectionRange(i, j) #@+node:EKR.20040526075238.5: *4* u.redoTyping def redoTyping(self): u = self; c = u.c; current = c.p w = c.frame.body.wrapper # selectPosition causes recoloring, so avoid if possible. if current != u.p: c.selectPosition(u.p) self.undoRedoText( u.p, u.leading, u.trailing, u.newMiddleLines, u.oldMiddleLines, u.newNewlines, u.oldNewlines, tag="redo", undoType=u.undoType) u.updateMarks('new') for v in u.dirtyVnodeList: v.setDirty() if u.newSel: c.bodyWantsFocus() i, j = u.newSel w.setSelectionRange(i, j, insert=j) if u.yview: c.bodyWantsFocus() w.setYScrollPosition(u.yview) #@+node:ekr.20031218072017.2039: *3* u.undo @cmd('undo') def undo(self, event=None): """Undo the operation described by the undo parameters.""" u = self; c = u.c w = c.frame.body.wrapper if not c.p: return g.trace('no current position') # End editing *before* getting state. c.endEditing() if u.per_node_undo: # 2011/05/19 u.setIvarsFromVnode(c.p) if not u.canUndo(): return if not u.getBead(u.bead): return u.undoing = True u.groupCount = 0 if u.undoHelper: u.undoHelper() else: g.trace('no undo helper for %s %s' % (u.kind, u.undoType)) c.checkOutline() # Redraw and recolor. c.frame.body.updateEditors() # New in Leo 4.4.8. if 0: # Don't do this: it interferes with selection ranges. # This strange code forces a recomputation of the root position. c.selectPosition(c.p) else: c.setCurrentPosition(c.p) if u.oldChanged is None: u.oldChanged = True c.setChanged(u.oldChanged) # Redrawing *must* be done here before setting u.undoing to False. i, j = w.getSelectionRange() ins = w.getInsertPoint() c.redraw() c.recolor() if u.inHead: c.editHeadline() u.inHead = False else: c.bodyWantsFocus() w.setSelectionRange(i, j, insert=ins) w.seeInsertPoint() u.undoing = False u.bead -= 1 u.setUndoTypes() #@+node:ekr.20110519074734.6093: *3* u.undo helpers #@+node:ekr.20050424170219.1: *4* u.undoClearRecentFiles def undoClearRecentFiles(self): u = self; c = u.c rf = g.app.recentFilesManager rf.setRecentFiles(u.oldRecentFiles[:]) rf.createRecentFilesMenuItems(c) #@+node:ekr.20111005152227.15560: *4* u.undoCloneMarkedNodes def undoCloneMarkedNodes(self): u = self next = u.p.next() assert next.h == 'Clones of marked nodes', (u.p, next.h) next.doDelete() u.p.setAllAncestorAtFileNodesDirty() u.c.selectPosition(u.p) #@+node:ekr.20160502175653.1: *4* u.undoCopyMarkedNodes def undoCopyMarkedNodes(self): u = self next = u.p.next() assert next.h == 'Copies of marked nodes', (u.p.h, next.h) next.doDelete() u.p.setAllAncestorAtFileNodesDirty() u.c.selectPosition(u.p) #@+node:ekr.20050412083057.1: *4* u.undoCloneNode def undoCloneNode(self): u = self; c = u.c; cc = c.chapterController if cc: cc.selectChapterByName('main') c.selectPosition(u.newP) c.deleteOutline() for v in u.dirtyVnodeList: v.setDirty() # Bug fix: Leo 4.4.6 c.selectPosition(u.p) #@+node:ekr.20111005152227.15557: *4* u.undoDeleteMarkedNodes def undoDeleteMarkedNodes(self): u = self; c = u.c # Undo the deletes in reverse order aList = u.deleteMarkedNodesData[:] aList.reverse() for p in aList: if p.stack: parent_v, junk = p.stack[-1] else: parent_v = c.hiddenRootNode p.v._addLink(p._childIndex, parent_v) u.p.setAllAncestorAtFileNodesDirty() c.selectPosition(u.p) #@+node:ekr.20050412084055: *4* u.undoDeleteNode def undoDeleteNode(self): u = self; c = u.c if u.oldBack: u.p._linkAfter(u.oldBack) elif u.oldParent: u.p._linkAsNthChild(u.oldParent, 0) else: oldRoot = c.rootPosition() u.p._linkAsRoot(oldRoot) u.p.setAllAncestorAtFileNodesDirty() c.selectPosition(u.p) #@+node:ekr.20080425060424.10: *4* u.undoDemote def undoDemote(self): u = self; c = u.c parent_v = u.p._parentVnode() n = len(u.followingSibs) # Remove the demoted nodes from p's children. u.p.v.children = u.p.v.children[: -n] # Add the demoted nodes to the parent's children. parent_v.children.extend(u.followingSibs) # Adjust the parent links. # There is no need to adjust descendant links. for sib in u.followingSibs: sib.parents.remove(u.p.v) sib.parents.append(parent_v) c.setCurrentPosition(u.p) #@+node:ekr.20050318085713: *4* u.undoGroup def undoGroup(self): '''Process beads until the matching 'beforeGroup' bead is seen.''' u = self # Remember these values. c = u.c dirtyVnodeList = u.dirtyVnodeList or [] oldSel = u.oldSel p = u.p.copy() u.groupCount += 1 bunch = u.beads[u.bead]; count = 0 if not hasattr(bunch, 'items'): g.trace('oops: expecting bunch.items. bunch.kind = %s' % bunch.kind) g.trace(bunch) else: # Important bug fix: 9/8/06: reverse the items first. reversedItems = bunch.items[:] reversedItems.reverse() for z in reversedItems: self.setIvarsFromBunch(z) if z.undoHelper: z.undoHelper(); count += 1 else: g.trace('oops: no undo helper for %s %s' % (u.undoType, p.v)) u.groupCount -= 1 u.updateMarks('old') # Bug fix: Leo 4.4.6. for v in dirtyVnodeList: v.setDirty() # Bug fix: Leo 4.4.6. if not g.unitTesting and u.verboseUndoGroup: g.es("undo", count, "instances") c.selectPosition(p) if oldSel: i, j = oldSel c.frame.body.wrapper.setSelectionRange(i, j) #@+node:ekr.20050412083244: *4* u.undoHoistNode & undoDehoistNode def undoHoistNode(self): u = self; c = u.c c.selectPosition(u.p) c.dehoist() def undoDehoistNode(self): u = self; c = u.c c.selectPosition(u.p) c.hoist() #@+node:ekr.20050412085112: *4* u.undoInsertNode def undoInsertNode(self): u = self; c = u.c; cc = c.chapterController if cc: cc.selectChapterByName('main') c.selectPosition(u.newP) c.deleteOutline() # Bug fix: 2016/03/30. # This always selects the proper new position. # c.selectPosition(u.p) if u.pasteAsClone: for bunch in u.beforeTree: v = bunch.v if u.p.v == v: c.setBodyString(u.p, bunch.body) c.setHeadString(u.p, bunch.head) else: v.setBodyString(bunch.body) v.setHeadString(bunch.head) #@+node:ekr.20050526124906: *4* u.undoMark def undoMark(self): u = self; c = u.c u.updateMarks('old') if u.groupCount == 0: for v in u.dirtyVnodeList: v.setDirty() # Bug fix: Leo 4.4.6. c.selectPosition(u.p) #@+node:ekr.20050411112033: *4* u.undoMove def undoMove(self): u = self; c = u.c; cc = c.chapterController if cc: cc.selectChapterByName('main') v = u.p.v assert(u.oldParent_v) assert(u.newParent_v) assert(v) # Adjust the children arrays. assert u.newParent_v.children[u.newN] == v del u.newParent_v.children[u.newN] u.oldParent_v.children.insert(u.oldN, v) # Recompute the parent links. v.parents.append(u.oldParent_v) v.parents.remove(u.newParent_v) u.updateMarks('old') for v in u.dirtyVnodeList: v.setDirty() c.selectPosition(u.p) #@+node:ekr.20050318085713.1: *4* u.undoNodeContents def undoNodeContents(self): '''Undo all changes to the contents of a node, including headline and body text, and marked bits. ''' u = self; c = u.c w = c.frame.body.wrapper u.p.b = u.oldBody w.setAllText(u.oldBody) c.frame.body.recolor(u.p) u.p.h = u.oldHead # This is required. Otherwise c.redraw will revert the change! c.frame.tree.setHeadline(u.p, u.oldHead) if u.groupCount == 0 and u.oldSel: i, j = u.oldSel w.setSelectionRange(i, j) if u.groupCount == 0 and u.oldYScroll is not None: w.setYScrollPosition(u.oldYScroll) u.updateMarks('old') for v in u.dirtyVnodeList: v.setDirty() # Bug fix: Leo 4.4.6. #@+node:ekr.20080425060424.14: *4* u.undoPromote def undoPromote(self): u = self; c = u.c parent_v = u.p._parentVnode() # The parent of the all the *promoted* nodes. # Remove the promoted nodes from parent_v's children. n = u.p.childIndex() + 1 # Adjust the old parents children old_children = parent_v.children parent_v.children = old_children[: n] # Add the nodes before the promoted nodes. parent_v.children.extend(old_children[n + len(u.children):]) # Add the nodes after the promoted nodes. # Add the demoted nodes to v's children. u.p.v.children = u.children[:] # Adjust the parent links. # There is no need to adjust descendant links. for child in u.children: child.parents.remove(parent_v) child.parents.append(u.p.v) c.setCurrentPosition(u.p) #@+node:ekr.20031218072017.1493: *4* u.undoRedoText def undoRedoText(self, p, leading, trailing, # Number of matching leading & trailing lines. oldMidLines, newMidLines, # Lists of unmatched lines. oldNewlines, newNewlines, # Number of trailing newlines. tag="undo", # "undo" or "redo" undoType=None ): '''Handle text undo and redo: converts _new_ text into _old_ text.''' # newNewlines is unused, but it has symmetry. u = self; c = u.c; w = c.frame.body.wrapper #@+<< Compute the result using p's body text >> #@+node:ekr.20061106105812.1: *5* << Compute the result using p's body text >> # Recreate the text using the present body text. body = p.b body = g.toUnicode(body) body_lines = body.split('\n') s = [] if leading > 0: s.extend(body_lines[: leading]) if oldMidLines: s.extend(oldMidLines) if trailing > 0: s.extend(body_lines[-trailing:]) s = '\n'.join(s) # Remove trailing newlines in s. while s and s[-1] == '\n': s = s[: -1] # Add oldNewlines newlines. if oldNewlines > 0: s = s + '\n' * oldNewlines result = s if u.debug_print: g.pr("body: ", body) g.pr("result:", result) #@-<< Compute the result using p's body text >> p.setBodyString(result) w.setAllText(result) sel = u.oldSel if tag == 'undo' else u.newSel if sel: i, j = sel w.setSelectionRange(i, j, insert=j) c.frame.body.recolor(p) w.seeInsertPoint() # 2009/12/21 #@+node:ekr.20050408100042: *4* u.undoRedoTree def undoRedoTree(self, p, new_data, old_data): '''Replace p and its subtree using old_data during undo.''' # Same as undoReplace except uses g.Bunch. u = self; c = u.c if new_data is None: # This is the first time we have undone the operation. # Put the new data in the bead. bunch = u.beads[u.bead] bunch.newTree = u.saveTree(p.copy()) u.beads[u.bead] = bunch # Replace data in tree with old data. u.restoreTree(old_data) c.setBodyString(p, p.b) return p # Nothing really changes. #@+node:ekr.20080425060424.5: *4* u.undoSort def undoSort(self): u = self; c = u.c parent_v = u.p._parentVnode() parent_v.children = u.oldChildren p = c.setPositionAfterSort(u.sortChildren) c.setCurrentPosition(p) #@+node:ekr.20050318085713.2: *4* u.undoTree def undoTree(self): '''Redo replacement of an entire tree.''' u = self; c = u.c u.p = self.undoRedoTree(u.p, u.newTree, u.oldTree) c.selectPosition(u.p) # Does full recolor. if u.oldSel: i, j = u.oldSel c.frame.body.wrapper.setSelectionRange(i, j) #@+node:EKR.20040526090701.4: *4* u.undoTyping def undoTyping(self): u = self; c = u.c; current = c.p w = c.frame.body.wrapper # selectPosition causes recoloring, so don't do this unless needed. if current != u.p: c.selectPosition(u.p) self.undoRedoText( u.p, u.leading, u.trailing, u.oldMiddleLines, u.newMiddleLines, u.oldNewlines, u.newNewlines, tag="undo", undoType=u.undoType) u.updateMarks('old') for v in u.dirtyVnodeList: v.setDirty() # Bug fix: Leo 4.4.6. if u.oldSel: c.bodyWantsFocus() i, j = u.oldSel w.setSelectionRange(i, j, insert=j) if u.yview: c.bodyWantsFocus() w.setYScrollPosition(u.yview) #@-others #@-others #@@language python #@@tabwidth -4 #@@pagewidth 70 #@-leo
the-stack_0_8241
#! coding:utf-8 """ The bottle module defines the Bottle class that is one element in a water sort puzzle. """ # Import to do typing :Bottle inside class Bottle from __future__ import annotations from typing import Sequence, Optional, Set, Any class BottleError(Exception): """Exception from the Bottle class.""" class Bottle: """ A bottle contains doses of colored water (up to Bottle.MAX_DOSES) The content of a bottle is a list of objects where each objet identifies a color. doses = [None, None, None, None] in case of empty bottle (nb_doses = 0) doses = ['X', None, None, None] where the bottle contains only one dose of 'X' (nb_doses = 1) doses = ['X', 'Y', 'Y', None] where the bottle contains one dose of 'X' at the bottom and 2 doses of 'Y' at the top (nb_doses = 3) In this situation, the bottle contains 3 doses with 2 different colors """ # Speedup properties for this class __slots__ = "doses", "nb_doses" MAX_DOSES = 4 def __init__(self, doses: Sequence): self.doses: list[Any] = [ None, ] * Bottle.MAX_DOSES self.nb_doses = 0 for dose in doses: if dose is not None: self.doses[self.nb_doses] = dose self.nb_doses += 1 @property def is_empty(self) -> bool: """@return True if the bottle is empty.""" return self.nb_doses == 0 @property def is_full(self) -> bool: """@return True if the bottle is full.""" return self.nb_doses == Bottle.MAX_DOSES @property def colors(self) -> Set[Any]: """@return Set of the different colors in the bottle.""" return set(self.doses[: self.nb_doses]) @property def nb_different_colors(self) -> int: """Number of different colors in the bottle.""" return len(self.colors) @property def top_color(self) -> Optional[Any]: """Top color in the bottle.""" if self.nb_doses == 0: return None return self.doses[self.nb_doses - 1] def iter_doses(self): """Iterator on every dose holding a color in the bottle.""" for i in range(self.nb_doses): yield self.doses[i] def is_same_as(self, other: Bottle) -> bool: """ @return True if bottles are the same. (same as __eq__ but not checking isinstance of the other bottle to speedup computation) """ if self.nb_doses != other.nb_doses: return False for i in range(self.nb_doses): if self.doses[i] != other.doses[i]: return False return True def pop_dose(self) -> Any: """Pop the top dose in the bottle and return its color.""" if self.is_empty: raise BottleError("Cannot pop dose from an empty bottle") ret = self.doses[self.nb_doses - 1] self.doses[self.nb_doses - 1] = None self.nb_doses -= 1 return ret def can_push_dose(self, color: Any) -> bool: """@return True if one dose of the color can be poured into the bottle.""" if self.nb_doses == 0: return True if self.nb_doses == Bottle.MAX_DOSES: return False return self.doses[self.nb_doses - 1] == color def push_dose(self, color: Any) -> None: """Pour one dose of the color into the bottle.""" if not self.can_push_dose(color): raise BottleError(f"Cannot pour {color} into {self}") self.doses[self.nb_doses] = color self.nb_doses += 1 def is_possible_to_pour_one_dose_into(self, destination: Bottle) -> bool: """ @return True if at least one dose of the top color can be poured into the destination bottle. """ if self.nb_doses == 0: return False if destination.nb_doses == 0: return True if destination.nb_doses == Bottle.MAX_DOSES: return False # Same top colors ? return ( self.doses[self.nb_doses - 1] == destination.doses[destination.nb_doses - 1] ) def is_interesting_to_pour_into(self, destination: Bottle) -> bool: """ @return True if pouring into destination leads to an interesting situation. (Quite the same as is_possible_to_pour_one_dose_into but also checking for interesting resulting situation) """ if destination.nb_doses == Bottle.MAX_DOSES: return False # destination is full if self.nb_doses == 0: return False # Source empty if destination.nb_doses == 0: if self.nb_different_colors == 1: return False # Because de resulting situation would be the same return True # Same top colors ? return ( self.doses[self.nb_doses - 1] == destination.doses[destination.nb_doses - 1] ) def pour_into(self, destination: Bottle) -> int: """Pour all possible doses of top color into the destination bottle. @return number of poured doses """ nb_doses = 0 while self.is_possible_to_pour_one_dose_into(destination): color = self.pop_dose() destination.push_dose(color) nb_doses += 1 return nb_doses def clone(self) -> Bottle: """@return Create a copy clone of the bottle.""" copy_list_doses = self.doses.copy() return Bottle(copy_list_doses) def __repr__(self): return f"<{self.doses[:self.nb_doses]}>"
the-stack_0_8242
import base64 import json import logging from html.parser import HTMLParser from http.client import HTTPConnection from markupsafe import escape from sqlalchemy import ( and_, desc, ) from sqlalchemy.orm import ( joinedload, lazyload, undefer, ) from sqlalchemy.sql import expression from galaxy import ( model, util, web, ) from galaxy.managers.sharable import SlugBuilder from galaxy.managers.workflows import ( MissingToolsException, WorkflowUpdateOptions, ) from galaxy.model.item_attrs import UsesItemRatings from galaxy.tools.parameters.basic import workflow_building_modes from galaxy.util import ( FILENAME_VALID_CHARS, unicodify, ) from galaxy.util.sanitize_html import sanitize_html from galaxy.web import ( error, url_for, ) from galaxy.web.framework.helpers import ( grids, time_ago, ) from galaxy.webapps.base.controller import ( BaseUIController, SharableMixin, UsesStoredWorkflowMixin, ) from galaxy.workflow.extract import ( extract_workflow, summarize, ) from galaxy.workflow.modules import ( load_module_sections, module_factory, ) from galaxy.workflow.render import ( STANDALONE_SVG_TEMPLATE, WorkflowCanvas, ) log = logging.getLogger(__name__) class StoredWorkflowListGrid(grids.Grid): class StepsColumn(grids.GridColumn): def get_value(self, trans, grid, workflow): return len(workflow.latest_workflow.steps) # Grid definition use_panels = True title = "Saved Workflows" model_class = model.StoredWorkflow default_filter = {"name": "All", "tags": "All"} default_sort_key = "-update_time" columns = [ grids.TextColumn("Name", key="name", attach_popup=True, filterable="advanced"), grids.IndividualTagsColumn( "Tags", "tags", model_tag_association_class=model.StoredWorkflowTagAssociation, filterable="advanced", grid_name="StoredWorkflowListGrid", ), StepsColumn("Steps"), grids.GridColumn("Created", key="create_time", format=time_ago), grids.GridColumn("Last Updated", key="update_time", format=time_ago), ] columns.append( grids.MulticolFilterColumn( "Search", cols_to_filter=[columns[0], columns[1]], key="free-text-search", visible=False, filterable="standard", ) ) operations = [ grids.GridOperation( "Edit", allow_multiple=False, condition=(lambda item: not item.deleted), async_compatible=False ), grids.GridOperation("Run", condition=(lambda item: not item.deleted), async_compatible=False), grids.GridOperation("Copy", condition=(lambda item: not item.deleted), async_compatible=False), grids.GridOperation("Rename", condition=(lambda item: not item.deleted), async_compatible=False), grids.GridOperation("Sharing", condition=(lambda item: not item.deleted), async_compatible=False), grids.GridOperation("Delete", condition=(lambda item: item.deleted), async_compatible=True), ] def apply_query_filter(self, trans, query, **kwargs): return query.filter_by(user=trans.user, deleted=False) class StoredWorkflowAllPublishedGrid(grids.Grid): title = "Published Workflows" model_class = model.StoredWorkflow default_sort_key = "update_time" default_filter = dict(public_url="All", username="All", tags="All") columns = [ grids.PublicURLColumn("Name", key="name", filterable="advanced", attach_popup=True), grids.OwnerAnnotationColumn( "Annotation", key="annotation", model_annotation_association_class=model.StoredWorkflowAnnotationAssociation, filterable="advanced", ), grids.OwnerColumn("Owner", key="username", model_class=model.User, filterable="advanced"), grids.CommunityRatingColumn("Community Rating", key="rating"), grids.CommunityTagsColumn( "Community Tags", key="tags", model_tag_association_class=model.StoredWorkflowTagAssociation, filterable="advanced", grid_name="PublicWorkflowListGrid", ), grids.ReverseSortColumn("Last Updated", key="update_time", format=time_ago), ] columns.append( grids.MulticolFilterColumn( "Search name, annotation, owner, and tags", cols_to_filter=[columns[0], columns[1], columns[2], columns[4]], key="free-text-search", visible=False, filterable="standard", ) ) operations = [ grids.GridOperation( "Run", condition=(lambda item: not item.deleted), allow_multiple=False, url_args=dict(controller="workflows", action="run"), ), grids.GridOperation( "Import", condition=(lambda item: not item.deleted), allow_multiple=False, url_args=dict(action="imp") ), grids.GridOperation( "Save as File", condition=(lambda item: not item.deleted), allow_multiple=False, url_args=dict(action="export_to_file"), ), ] num_rows_per_page = 50 use_paging = True def build_initial_query(self, trans, **kwargs): # See optimization description comments and TODO for tags in matching public histories query. # In addition to that - be sure to lazyload the latest_workflow - it isn't needed and it causes all # of its steps to be eagerly loaded. return ( trans.sa_session.query(self.model_class) .join("user") .options( lazyload("latest_workflow"), joinedload("user").load_only("username"), joinedload("annotations"), undefer("average_rating"), ) ) def apply_query_filter(self, trans, query, **kwargs): # A public workflow is published, has a slug, and is not deleted. return ( query.filter(self.model_class.published == expression.true()) .filter(self.model_class.slug.isnot(None)) .filter(self.model_class.deleted == expression.false()) ) # Simple HTML parser to get all content in a single tag. class SingleTagContentsParser(HTMLParser): def __init__(self, target_tag): # Cannot use super() because HTMLParser is an old-style class in Python2 HTMLParser.__init__(self) self.target_tag = target_tag self.cur_tag = None self.tag_content = "" def handle_starttag(self, tag, attrs): """Called for each start tag.""" self.cur_tag = tag def handle_data(self, text): """Called for each block of plain text.""" if self.cur_tag == self.target_tag: self.tag_content += text class WorkflowController(BaseUIController, SharableMixin, UsesStoredWorkflowMixin, UsesItemRatings): stored_list_grid = StoredWorkflowListGrid() published_list_grid = StoredWorkflowAllPublishedGrid() slug_builder = SlugBuilder() @web.expose @web.require_login("use Galaxy workflows") def list_grid(self, trans, **kwargs): """List user's stored workflows.""" # status = message = None if "operation" in kwargs: operation = kwargs["operation"].lower() if operation == "rename": return self.rename(trans, **kwargs) history_ids = util.listify(kwargs.get("id", [])) if operation == "sharing": return self.sharing(trans, id=history_ids) return self.stored_list_grid(trans, **kwargs) @web.expose @web.require_login("use Galaxy workflows", use_panels=True) def list(self, trans): """ Render workflow main page (management of existing workflows) """ # Take care of proxy prefix in url as well redirect_url = f"{url_for('/')}workflow" return trans.response.send_redirect(redirect_url) @web.expose @web.json def list_published(self, trans, **kwargs): return self.published_list_grid(trans, **kwargs) @web.expose def display_by_username_and_slug(self, trans, username, slug, format="html"): """ Display workflow based on a username and slug. Format can be html, json, or json-download. """ # Get workflow by username and slug. Security is handled by the display methods below. session = trans.sa_session user = session.query(model.User).filter_by(username=username).first() if not user: raise web.httpexceptions.HTTPNotFound() stored_workflow = ( trans.sa_session.query(model.StoredWorkflow).filter_by(user=user, slug=slug, deleted=False).first() ) if not stored_workflow: raise web.httpexceptions.HTTPNotFound() encoded_id = trans.security.encode_id(stored_workflow.id) # Display workflow in requested format. if format == "html": return self._display(trans, stored_workflow) elif format == "json": return self.for_direct_import(trans, encoded_id) elif format == "json-download": return self.export_to_file(trans, encoded_id) @web.expose def display_by_id(self, trans, id): """Display workflow based on id.""" # Get workflow. stored_workflow = self.get_stored_workflow(trans, id) return self._display(trans, stored_workflow) def _display(self, trans, stored_workflow): """Diplay workflow as HTML page.""" if stored_workflow is None: raise web.httpexceptions.HTTPNotFound() # Security check raises error if user cannot access workflow. self.security_check(trans, stored_workflow, False, True) # Get data for workflow's steps. self.get_stored_workflow_steps(trans, stored_workflow) # Get annotations. stored_workflow.annotation = self.get_item_annotation_str( trans.sa_session, stored_workflow.user, stored_workflow ) for step in stored_workflow.latest_workflow.steps: step.annotation = self.get_item_annotation_str(trans.sa_session, stored_workflow.user, step) user_is_owner = True if trans.user == stored_workflow.user else False # Get rating data. user_item_rating = 0 if trans.get_user(): user_item_rating = self.get_user_item_rating(trans.sa_session, trans.get_user(), stored_workflow) if user_item_rating: user_item_rating = user_item_rating.rating else: user_item_rating = 0 ave_item_rating, num_ratings = self.get_ave_item_rating_data(trans.sa_session, stored_workflow) return trans.fill_template_mako( "workflow/display.mako", item=stored_workflow, item_data=stored_workflow.latest_workflow.steps, user_item_rating=user_item_rating, ave_item_rating=ave_item_rating, num_ratings=num_ratings, user_is_owner=user_is_owner, ) @web.expose def get_item_content_async(self, trans, id): """Returns item content in HTML format.""" stored = self.get_stored_workflow(trans, id, False, True) if stored is None: raise web.httpexceptions.HTTPNotFound() # Get data for workflow's steps. self.get_stored_workflow_steps(trans, stored) # Get annotations. stored.annotation = self.get_item_annotation_str(trans.sa_session, stored.user, stored) for step in stored.latest_workflow.steps: step.annotation = self.get_item_annotation_str(trans.sa_session, stored.user, step) return trans.fill_template_mako( "/workflow/item_content.mako", item=stored, item_data=stored.latest_workflow.steps ) @web.expose @web.require_login("use Galaxy workflows") def share(self, trans, id, email="", use_panels=False): msg = mtype = None # Load workflow from database stored = self.get_stored_workflow(trans, id) if email: other = ( trans.sa_session.query(model.User) .filter(and_(model.User.table.c.email == email, model.User.table.c.deleted == expression.false())) .first() ) if not other: mtype = "error" msg = f"User '{escape(email)}' does not exist" elif other == trans.get_user(): mtype = "error" msg = "You cannot share a workflow with yourself" elif ( trans.sa_session.query(model.StoredWorkflowUserShareAssociation) .filter_by(user=other, stored_workflow=stored) .count() > 0 ): mtype = "error" msg = f"Workflow already shared with '{escape(email)}'" else: share = model.StoredWorkflowUserShareAssociation() share.stored_workflow = stored share.user = other session = trans.sa_session session.add(share) session.flush() trans.set_message(f"Workflow '{escape(stored.name)}' shared with user '{escape(other.email)}'") return trans.response.send_redirect(url_for(controller="workflow", action="sharing", id=id)) return trans.fill_template( "/ind_share_base.mako", message=msg, messagetype=mtype, item=stored, email=email, use_panels=use_panels ) @web.expose @web.require_login("export Galaxy workflows") def export(self, trans, id, **kwargs): """Handle workflow export.""" session = trans.sa_session # Get session and workflow. stored = self.get_stored_workflow(trans, id) session.add(stored) # Legacy issue: workflows made accessible before recent updates may not have a slug. Create slug for any workflows that need them. if stored.importable and not stored.slug: self._make_item_accessible(trans.sa_session, stored) session.flush() return trans.fill_template("/workflow/sharing.mako", use_panels=True, item=stored) @web.expose @web.require_login("to import a workflow", use_panels=True) def imp(self, trans, id, **kwargs): """Imports a workflow shared by other users.""" # Set referer message. referer = trans.request.referer if referer and not referer.startswith(f"{trans.request.application_url}{url_for('/login')}"): referer_message = f"<a href='{escape(referer)}'>return to the previous page</a>" else: referer_message = f"<a href='{url_for('/')}'>go to Galaxy's start page</a>" # Do import. stored = self.get_stored_workflow(trans, id, check_ownership=False) if stored.importable is False: return trans.show_error_message( f"The owner of this workflow has disabled imports via this link.<br>You can {referer_message}", use_panels=True, ) elif stored.deleted: return trans.show_error_message( f"You can't import this workflow because it has been deleted.<br>You can {referer_message}", use_panels=True, ) self._import_shared_workflow(trans, stored) # Redirect to load galaxy frames. return trans.show_ok_message( message="""Workflow "%s" has been imported. <br>You can <a href="%s">start using this workflow</a> or %s.""" % (stored.name, web.url_for("/workflows/list"), referer_message) ) @web.expose @web.require_login("use Galaxy workflows") def rename_async(self, trans, id, new_name=None, **kwargs): stored = self.get_stored_workflow(trans, id) if new_name: san_new_name = sanitize_html(new_name) stored.name = san_new_name stored.latest_workflow.name = san_new_name trans.sa_session.flush() return stored.name @web.expose @web.require_login("use Galaxy workflows") def annotate_async(self, trans, id, new_annotation=None, **kwargs): stored = self.get_stored_workflow(trans, id) if new_annotation: # Sanitize annotation before adding it. new_annotation = sanitize_html(new_annotation) self.add_item_annotation(trans.sa_session, trans.get_user(), stored, new_annotation) trans.sa_session.flush() return new_annotation @web.expose @web.require_login("rate items") @web.json def rate_async(self, trans, id, rating): """Rate a workflow asynchronously and return updated community data.""" stored = self.get_stored_workflow(trans, id, check_ownership=False, check_accessible=True) if not stored: return trans.show_error_message("The specified workflow does not exist.") # Rate workflow. self.rate_item(trans.sa_session, trans.get_user(), stored, rating) return self.get_ave_item_rating_data(trans.sa_session, stored) @web.expose def get_embed_html_async(self, trans, id): """Returns HTML for embedding a workflow in a page.""" # TODO: user should be able to embed any item he has access to. see display_by_username_and_slug for security code. stored = self.get_stored_workflow(trans, id) if stored: return f"Embedded Workflow '{stored.name}'" @web.expose @web.json @web.require_login("use Galaxy workflows") def get_name_and_link_async(self, trans, id=None): """Returns workflow's name and link.""" stored = self.get_stored_workflow(trans, id) return_dict = { "name": stored.name, "link": url_for( controller="workflow", action="display_by_username_and_slug", username=stored.user.username, slug=stored.slug, ), } return return_dict @web.expose @web.require_login("use Galaxy workflows") def gen_image(self, trans, id): stored = self.get_stored_workflow(trans, id, check_ownership=True) try: svg = self._workflow_to_svg_canvas(trans, stored) except Exception: status = "error" message = ( "Galaxy is unable to create the SVG image. Please check your workflow, there might be missing tools." ) return trans.fill_template( "/workflow/sharing.mako", use_panels=True, item=stored, status=status, message=message ) trans.response.set_content_type("image/svg+xml") s = STANDALONE_SVG_TEMPLATE % svg.tostring() return s.encode("utf-8") @web.expose @web.require_login("use Galaxy workflows") def copy(self, trans, id, save_as_name=None): # Get workflow to copy. stored = self.get_stored_workflow(trans, id, check_ownership=False) user = trans.get_user() if stored.user == user: owner = True else: if ( trans.sa_session.query(model.StoredWorkflowUserShareAssociation) .filter_by(user=user, stored_workflow=stored) .count() == 0 ): error("Workflow is not owned by or shared with current user") owner = False # Copy. new_stored = model.StoredWorkflow() if save_as_name: new_stored.name = f"{save_as_name}" else: new_stored.name = f"Copy of {stored.name}" new_stored.latest_workflow = stored.latest_workflow # Copy annotation. annotation_obj = self.get_item_annotation_obj(trans.sa_session, stored.user, stored) if annotation_obj: self.add_item_annotation(trans.sa_session, trans.get_user(), new_stored, annotation_obj.annotation) new_stored.copy_tags_from(trans.user, stored) if not owner: new_stored.name += f" shared by {stored.user.email}" new_stored.user = user # Persist session = trans.sa_session session.add(new_stored) session.flush() # Display the management page message = f"Created new workflow with name: {escape(new_stored.name)}" trans.set_message(message) return_url = f"{url_for('/')}workflow?status=done&message={escape(message)}" trans.response.send_redirect(return_url) @web.legacy_expose_api def create(self, trans, payload=None, **kwd): if trans.request.method == "GET": return { "title": "Create Workflow", "inputs": [ {"name": "workflow_name", "label": "Name", "value": "Unnamed workflow"}, { "name": "workflow_annotation", "label": "Annotation", "help": "A description of the workflow; annotation is shown alongside shared or published workflows.", }, ], } else: user = trans.get_user() workflow_name = payload.get("workflow_name") workflow_annotation = payload.get("workflow_annotation") if not workflow_name: return self.message_exception(trans, "Please provide a workflow name.") # Create the new stored workflow stored_workflow = model.StoredWorkflow() stored_workflow.name = workflow_name stored_workflow.user = user self.slug_builder.create_item_slug(trans.sa_session, stored_workflow) # And the first (empty) workflow revision workflow = model.Workflow() workflow.name = workflow_name workflow.stored_workflow = stored_workflow stored_workflow.latest_workflow = workflow # Add annotation. workflow_annotation = sanitize_html(workflow_annotation) self.add_item_annotation(trans.sa_session, trans.get_user(), stored_workflow, workflow_annotation) # Persist session = trans.sa_session session.add(stored_workflow) session.flush() return { "id": trans.security.encode_id(stored_workflow.id), "message": f"Workflow {workflow_name} has been created.", } @web.json def save_workflow_as(self, trans, workflow_name, workflow_data, workflow_annotation="", from_tool_form=False): """ Creates a new workflow based on Save As command. It is a new workflow, but is created with workflow_data already present. """ user = trans.get_user() if workflow_name is not None: workflow_contents_manager = self.app.workflow_contents_manager stored_workflow = model.StoredWorkflow() stored_workflow.name = workflow_name stored_workflow.user = user self.slug_builder.create_item_slug(trans.sa_session, stored_workflow) workflow = model.Workflow() workflow.name = workflow_name workflow.stored_workflow = stored_workflow stored_workflow.latest_workflow = workflow # Add annotation. workflow_annotation = sanitize_html(workflow_annotation) self.add_item_annotation(trans.sa_session, trans.get_user(), stored_workflow, workflow_annotation) # Persist session = trans.sa_session session.add(stored_workflow) session.flush() workflow_update_options = WorkflowUpdateOptions( update_stored_workflow_attributes=False, # taken care of above from_tool_form=from_tool_form, ) try: workflow, errors = workflow_contents_manager.update_workflow_from_raw_description( trans, stored_workflow, workflow_data, workflow_update_options, ) except MissingToolsException as e: return dict( name=e.workflow.name, message=( "This workflow includes missing or invalid tools. " "It cannot be saved until the following steps are removed or the missing tools are enabled." ), errors=e.errors, ) return trans.security.encode_id(stored_workflow.id) else: # This is an error state, 'save as' must have a workflow_name log.exception("Error in Save As workflow: no name.") @web.expose def delete(self, trans, id=None): """ Mark a workflow as deleted """ # Load workflow from database stored = self.get_stored_workflow(trans, id) # Mark as deleted and save stored.deleted = True trans.user.stored_workflow_menu_entries = [ entry for entry in trans.user.stored_workflow_menu_entries if entry.stored_workflow != stored ] trans.sa_session.add(stored) trans.sa_session.flush() # Display the management page message = f"Workflow deleted: {escape(stored.name)}" trans.set_message(message) return trans.response.send_redirect(f"{url_for('/')}workflow?status=done&message={escape(message)}") @web.expose @web.require_login("edit workflows") def editor(self, trans, id=None, workflow_id=None, version=None): """ Render the main workflow editor interface. The canvas is embedded as an iframe (necessary for scrolling to work properly), which is rendered by `editor_canvas`. """ if not id: if workflow_id: stored_workflow = self.app.workflow_manager.get_stored_workflow(trans, workflow_id, by_stored_id=False) self.security_check(trans, stored_workflow, True, False) stored_workflow_id = trans.security.encode_id(stored_workflow.id) return trans.response.send_redirect(f'{url_for("/")}workflow/editor?id={stored_workflow_id}') error("Invalid workflow id") stored = self.get_stored_workflow(trans, id) # The following query loads all user-owned workflows, # So that they can be copied or inserted in the workflow editor. workflows = ( trans.sa_session.query(model.StoredWorkflow) .filter_by(user=trans.user, deleted=False, hidden=False) .order_by(desc(model.StoredWorkflow.table.c.update_time)) .options(joinedload("latest_workflow").joinedload("steps")) .all() ) if version is None: version = len(stored.workflows) - 1 else: version = int(version) # create workflow module models module_sections = [] for module_section in load_module_sections(trans).values(): module_sections.append( { "title": module_section.get("title"), "name": module_section.get("name"), "elems": [ {"name": elem.get("name"), "title": elem.get("title"), "description": elem.get("description")} for elem in module_section.get("modules") ], } ) # create data manager tool models data_managers = [] if trans.user_is_admin and trans.app.data_managers.data_managers: for data_manager_val in trans.app.data_managers.data_managers.values(): tool = data_manager_val.tool if not tool.hidden: data_managers.append( { "id": tool.id, "name": tool.name, "hidden": tool.hidden, "description": tool.description, "is_workflow_compatible": tool.is_workflow_compatible, } ) # create workflow models workflows = [ { "id": trans.security.encode_id(workflow.id), "latest_id": trans.security.encode_id(workflow.latest_workflow.id), "step_count": len(workflow.latest_workflow.steps), "name": workflow.name, } for workflow in workflows if workflow.id != stored.id ] # identify item tags item_tags = [tag for tag in stored.tags if tag.user == trans.user] item_tag_names = [] for ta in item_tags: item_tag_names.append(escape(ta.tag.name)) # build workflow editor model editor_config = { "id": trans.security.encode_id(stored.id), "name": stored.name, "tags": item_tag_names, "initialVersion": version, "annotation": self.get_item_annotation_str(trans.sa_session, trans.user, stored), "moduleSections": module_sections, "dataManagers": data_managers, "workflows": workflows, } # parse to mako return trans.fill_template("workflow/editor.mako", editor_config=editor_config) @web.json def load_workflow(self, trans, id, version=None): """ Get the latest Workflow for the StoredWorkflow identified by `id` and encode it as a json string that can be read by the workflow editor web interface. """ trans.workflow_building_mode = workflow_building_modes.ENABLED stored = self.get_stored_workflow(trans, id, check_ownership=True, check_accessible=False) workflow_contents_manager = self.app.workflow_contents_manager return workflow_contents_manager.workflow_to_dict(trans, stored, style="editor", version=version) @web.expose @web.require_login("use workflows") def export_to_myexp(self, trans, id, myexp_username, myexp_password): """ Exports a workflow to myExperiment website. """ trans.workflow_building_mode = workflow_building_modes.ENABLED stored = self.get_stored_workflow(trans, id, check_ownership=False, check_accessible=True) # Convert workflow to dict. workflow_dict = self._workflow_to_dict(trans, stored) # # Create and submit workflow myExperiment request. # # Create workflow content JSON. workflow_content = json.dumps(workflow_dict, indent=4, sort_keys=True) # Create myExperiment request. request_raw = trans.fill_template( "workflow/myexp_export.mako", workflow_name=workflow_dict["name"], workflow_description=workflow_dict["annotation"], workflow_content=workflow_content, workflow_svg=self._workflow_to_svg_canvas(trans, stored).tostring(), ) # strip() b/c myExperiment XML parser doesn't allow white space before XML; utf-8 handles unicode characters. request = unicodify(request_raw.strip(), "utf-8") # Do request and get result. auth_header = base64.b64encode(f"{myexp_username}:{myexp_password}") headers = {"Content-type": "text/xml", "Accept": "text/xml", "Authorization": f"Basic {auth_header}"} myexp_url = trans.app.config.myexperiment_target_url conn = HTTPConnection(myexp_url) # NOTE: blocks web thread. conn.request("POST", "/workflow.xml", request, headers) response = conn.getresponse() response_data = response.read() conn.close() # Do simple parse of response to see if export successful and provide user feedback. parser = SingleTagContentsParser("id") parser.feed(response_data) myexp_workflow_id = parser.tag_content workflow_list_str = f" <br>Return to <a href='{url_for(controller='workflows', action='list')}'>workflow list." if myexp_workflow_id: return trans.show_message( """Workflow '{}' successfully exported to myExperiment. <br/> <a href="http://{}/workflows/{}">Click here to view the workflow on myExperiment</a> {} """.format( stored.name, myexp_url, myexp_workflow_id, workflow_list_str ), use_panels=True, ) else: return trans.show_error_message( "Workflow '%s' could not be exported to myExperiment. Error: %s %s" % (stored.name, response_data, workflow_list_str), use_panels=True, ) @web.json_pretty def for_direct_import(self, trans, id): """ Get the latest Workflow for the StoredWorkflow identified by `id` and encode it as a json string that can be imported back into Galaxy This has slightly different information than the above. In particular, it does not attempt to decode forms and build UIs, it just stores the raw state. """ stored = self.get_stored_workflow(trans, id, check_ownership=False, check_accessible=True) return self._workflow_to_dict(trans, stored) @web.json_pretty def export_to_file(self, trans, id): """ Get the latest Workflow for the StoredWorkflow identified by `id` and encode it as a json string that can be imported back into Galaxy This has slightly different information than the above. In particular, it does not attempt to decode forms and build UIs, it just stores the raw state. """ # Get workflow. stored = self.get_stored_workflow(trans, id, check_ownership=False, check_accessible=True) # Stream workflow to file. stored_dict = self._workflow_to_dict(trans, stored) if not stored_dict: # This workflow has a tool that's missing from the distribution trans.response.status = 400 return "Workflow cannot be exported due to missing tools." sname = stored.name sname = "".join(c in FILENAME_VALID_CHARS and c or "_" for c in sname)[0:150] trans.response.headers["Content-Disposition"] = f'attachment; filename="Galaxy-Workflow-{sname}.ga"' trans.response.set_content_type("application/galaxy-archive") return stored_dict @web.expose def build_from_current_history( self, trans, job_ids=None, dataset_ids=None, dataset_collection_ids=None, workflow_name=None, dataset_names=None, dataset_collection_names=None, ): user = trans.get_user() history = trans.get_history() if not user: return trans.show_error_message("Must be logged in to create workflows") if (job_ids is None and dataset_ids is None) or workflow_name is None: jobs, warnings = summarize(trans) # Render return trans.fill_template( "workflow/build_from_current_history.mako", jobs=jobs, warnings=warnings, history=history ) else: # If there is just one dataset name selected or one dataset collection, these # come through as string types instead of lists. xref #3247. dataset_names = util.listify(dataset_names) dataset_collection_names = util.listify(dataset_collection_names) stored_workflow = extract_workflow( trans, user=user, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, workflow_name=workflow_name, dataset_names=dataset_names, dataset_collection_names=dataset_collection_names, ) # Index page with message workflow_id = trans.security.encode_id(stored_workflow.id) return trans.show_message( 'Workflow "%s" created from current history. ' 'You can <a href="%s" target="_parent">edit</a> or <a href="%s" target="_parent">run</a> the workflow.' % ( escape(workflow_name), url_for(controller="workflow", action="editor", id=workflow_id), url_for(controller="workflows", action="run", id=workflow_id), ) ) def get_item(self, trans, id): return self.get_stored_workflow(trans, id) def _workflow_to_svg_canvas(self, trans, stored): workflow = stored.latest_workflow workflow_canvas = WorkflowCanvas() for step in workflow.steps: # Load from database representation module = module_factory.from_workflow_step(trans, step) module_name = module.get_name() module_data_inputs = module.get_data_inputs() module_data_outputs = module.get_data_outputs() workflow_canvas.populate_data_for_step( step, module_name, module_data_inputs, module_data_outputs, ) workflow_canvas.add_steps() return workflow_canvas.finish()
the-stack_0_8243
from batou.component import Component from batou.lib.appenv import AppEnv from batou.lib.file import SyncDirectory, File from batou.lib.supervisor import Program from batou.utils import Address class Django(Component): def configure(self): self.address = Address(self.host.fqdn, "8081") self += AppEnv("3.8") self += SyncDirectory("mysite", source="mysite") self += File("foo", content="asdf\nbsdf\ncsdf") self += Program( "django", command="bin/python", deployment="cold", options={"stopasgroup": "true"}, args=self.expand("mysite/manage.py runserver " " {{component.address.listen}}"), )
the-stack_0_8245
from setuptools import setup import os.path current_dir = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(current_dir, 'README.md')) as rdr: long_description = rdr.read() setup(name='pymonkey', version='0.1.0', description='Monkey interpreter', long_description=long_description, url='http://github.com/adamvinueza/pymonkey', author='Adam Vinueza', author_email='[email protected]', license='Apache 2.0', packages=['pymonkey'], classifiers=[ 'Development Status :: 1 - Planning', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7' ], zip_safe=False)
the-stack_0_8247
import json import jsonpickle from decimal import Decimal from flask import Blueprint from farmsList.public.models import Parcel, Farmland, AdditionalLayer from farmsList.database import db from sqlalchemy import func blueprint = Blueprint('api', __name__, url_prefix='/api', static_folder="../static") def pre_json_encode(obj): for key in obj.__dict__.keys(): if isinstance(obj.__dict__[key], Decimal): obj.__dict__[key] = float(obj.__dict__[key]) obj.__dict__['_sa_instance_state'] = None return obj @blueprint.route("/parcel/", methods=["GET", "POST"]) def api_parcel(): farmlandData = Farmland.query.filter(Farmland.public == True).all() for farmland in farmlandData: farmland.geometry = db.session.query(func.ST_AsGeoJson(farmland.geometry)).all()[0][0] db.session.close() farmland.center = db.session.query(func.ST_AsGeoJson(farmland.center)).all()[0][0] db.session.close() farmland.center = json.loads(str(farmland.center)) farmland = pre_json_encode(farmland) return jsonpickle.encode(farmlandData, unpicklable=False, make_refs=False) @blueprint.route("/farmland/<int:farmlandId>", methods=["GET", "POST"]) def api_farmland_by_id(farmlandId): farmlandData = Farmland.query.filter_by(id=farmlandId).all()[0] farmlandData.center = db.session.query(func.ST_AsGeoJson(farmlandData.center)).all()[0][0] db.session.close() farmlandData.geometry = db.session.query(func.ST_AsGeoJson(farmlandData.geometry)).all()[0][0] db.session.close() farmlandData.center = json.loads(str(farmlandData.center)) farmlandData = pre_json_encode(farmlandData) return jsonpickle.encode(farmlandData, unpicklable=False, make_refs=False) @blueprint.route("/tax-incentive-zones", methods=["GET"]) def tax_incentive_zones(): taxIncentiveZones = AdditionalLayer.query.filter_by(name="taxIncentive").all() for taxIncentiveZone in taxIncentiveZones: taxIncentiveZone.geometry = db.session.query(func.ST_AsGeoJson(taxIncentiveZone.geom)).all()[0][0] db.session.close() return jsonpickle.encode(taxIncentiveZones, unpicklable=False, make_refs=False) @blueprint.route("/food-deserts", methods=["GET"]) def food_desert_zones(): foodDeserts = AdditionalLayer.query.filter_by(name="foodDesert").all() for taxIncentiveZone in foodDeserts: taxIncentiveZone.geometry = db.session.query(func.ST_AsGeoJson(taxIncentiveZone.geom)).all()[0][0] db.session.close() return jsonpickle.encode(foodDeserts, unpicklable=False, make_refs=False)
the-stack_0_8248
import requests import json import time import logging log = logging.getLogger(__name__) sh = logging.StreamHandler() log.addHandler(sh) log.setLevel(logging.INFO) from nose.tools import with_setup import pymongo from bson.objectid import ObjectId db = pymongo.MongoClient('mongodb://localhost:9001/scitran').get_default_database() adm_user = '[email protected]' base_url = 'http://localhost:8080/api' test_data = type('',(object,),{})() def setup_db(): global session session = requests.Session() session.params = { 'user': adm_user, 'root': True } test_data.group_id = 'test_group_' + str(int(time.time()*1000)) payload = { '_id': test_data.group_id } payload = json.dumps(payload) r = session.post(base_url + '/groups', data=payload) assert r.ok payload = { 'group': test_data.group_id, 'label': 'test_project', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/projects', data=payload) test_data.pid = json.loads(r.content)['_id'] assert r.ok log.debug('pid = \'{}\''.format(test_data.pid)) payload = { 'project': test_data.pid, 'label': 'session_testing', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/sessions', data=payload) assert r.ok test_data.sid = json.loads(r.content)['_id'] log.debug('sid = \'{}\''.format(test_data.sid)) payload = { 'session': test_data.sid, 'label': 'acq_testing', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/acquisitions', data=payload) assert r.ok test_data.aid = json.loads(r.content)['_id'] log.debug('aid = \'{}\''.format(test_data.aid)) def teardown_db(): session.params['root'] = True r = session.delete(base_url + '/acquisitions/' + test_data.aid) assert r.ok r = session.delete(base_url + '/sessions/' + test_data.sid) assert r.ok r = session.delete(base_url + '/projects/' + test_data.pid) assert r.ok @with_setup(setup_db, teardown_db) def test_collections(): payload = { 'curator': adm_user, 'label': 'test_collection_'+ str(int(time.time())) , 'public': False } session.params['root'] = False r = session.post(base_url + '/collections', data=json.dumps(payload)) assert r.ok _id = json.loads(r.content)['_id'] log.debug('_id = \'{}\''.format(_id)) r = session.get(base_url + '/collections/' + _id) assert r.ok payload = { 'contents':{ 'nodes': [{ 'level': 'session', '_id': test_data.sid }], 'operation': 'add' } } r = session.put(base_url + '/collections/' + _id, data=json.dumps(payload)) assert r.ok r = session.get(base_url + '/collections/' + _id + '/acquisitions?session=' + test_data.sid) assert r.ok coll_acq_id= json.loads(r.content)[0]['_id'] assert coll_acq_id == test_data.aid acq_ids = [ObjectId(test_data.aid)] acs = db.acquisitions.find({'_id': {'$in': acq_ids}}) for ac in acs: assert len(ac['collections']) == 1 assert ac['collections'][0] == ObjectId(_id) r = session.delete(base_url + '/collections/' + _id) assert r.ok r = session.get(base_url + '/collections/' + _id) assert r.status_code == 404 acs = db.acquisitions.find({'_id': {'$in': acq_ids}}) for ac in acs: assert len(ac['collections']) == 0
the-stack_0_8249
"""empty message Revision ID: 65edcc47e4ed Revises: c9d6313461dd Create Date: 2020-05-24 17:13:03.346660 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '65edcc47e4ed' down_revision = 'c9d6313461dd' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('users', sa.Column('confirmed', sa.Boolean(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('users', 'confirmed') # ### end Alembic commands ###
the-stack_0_8251
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Project: Tesis Lali Experiment: Camp Visual -> Perimetria Created on Sun Feb 3 11:29:49 2019 @author: Aitor Matilla Hitoria dels canvis versio 2.0.0 canvi mètode: de dins a fora i de fora a dins (2 voltes) enlloc de versio 2.0.1 canvi de nom a Perimetria, afegir time stamp + info versio dins resultat versio 2.0.2 can crear un nom de resultats de perimetria amb nom fix per tal que el movilab pugui crear la mascara. crearem dos resutats ATEMCIO: avancem de versió però pel PC del deslumbrometre ens convé que el nom de l'arxiu sigui sempre el mateix. per tant el nom arxiu a partir d'ara no indica versio 3 voltes de dins a fora. """ from tkinter import * from tkinter import messagebox import time import datetime import math import shutil CVVersion = "Perimetria Movilab v 2.0.2" #Primer llegim de l'arxiu la mida del punt a mostrar a pantalla file = open('..\DotSize.txt', 'r') if file.mode != 'r' : print ('Error reading DoSize.txt') exit() dotSize = file.readline().rstrip() idPatient = file.readline().rstrip() doRightEyeFile = file.readline().rstrip() doLeftEyeFile = file.readline().rstrip() doBothEyesFile = file.readline().rstrip() nIterationsFile = file.readline().rstrip() crossLineFile = file.readline().rstrip() file.close() #Creem/obrim l'arxiu a on escriurem les dades de l'experiment sep=';' endl= '\n' timetag = datetime.datetime.now().strftime("_%Y%m%d_%H%M%S") resultsFilename = "..\Resultats\CampVisual_Resultats"+ timetag +".txt" file = open(resultsFilename, 'w') if file.mode != 'w' : print ('Error reading ' + resultsFilename) exit() #file.write('-------------------------------------\n') #file.write(' New experiment \n') #file.write('-------------------------------------\n') strAux = "Version" + sep + CVVersion + endl file.write(strAux) strAux = 'Initial Date & Time:' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + sep + endl file.write(strAux) strAux = 'Dot size' + sep + dotSize + sep + sep +sep + sep + sep+ endl file.write(strAux) strAux = 'ID patient' + sep + idPatient + endl file.write(strAux) file.close() #Variables de configuració backgroundColor = "black" #"darkgray" focusColor = "white" centralColor = "orange" dotSpeed = 1 #segons #Variables de fluxe execució deg90 = 1 deg45 = 2 deg0 = 3 deg315 = 4 deg270 = 5 deg225 = 6 deg180 = 7 deg135 = 8 experimentDone = 9 currentStep = deg90 #numberOfLoops = int(nIterationsFile) numberOfLoops = 2 eyeLeft=1 eyeRight=2 eyeBoth=3 applyEyeLeft=int(doLeftEyeFile) applyEyeRight=int(doRightEyeFile) applyEyeBoth=int(doBothEyesFile) currentEye=eyeRight currentLoop = 0 # num keys presseed ha de ser zero per tal que el 1r loop vagi de dins a fora numberKeysPressed = 0 maxKeysPressed = 0 GoUp=100 GoDown=101 GoEnter=102 aStr='' bStr='' cStr='' dStr='' eStr='' fStr='' gStr='' hStr='' if (applyEyeRight == 0) and applyEyeLeft == 1: currentEye = eyeLeft elif applyEyeRight == 0 and applyEyeBoth == 1: currentEye = eyeBoth #Obtenim la resolució del monitor per posicionar el centre #OS dependent TODO wMonitorRes = 1920 hMonitorRes = 1080 xPosition = 0 yPosition = 0 def writeToFile (msg): global resultsFilename file = open(resultsFilename, 'a') file.write (msg + '\n') file.close() #Key press action def kp(event): #if event.keysym == 'Return': #verticalAxis(w, int(xLine/2), int(yLine/2), dotSizeNumber, 1) if event.keysym == 'Up': newMovement(w, GoUp) elif event.keysym == 'Down': newMovement(w, GoDown) elif event.keysym == 'Space': # spaceAction = 1 print ("space") elif event.keysym == 'Return': newMovement(w, GoEnter) experimentProcedure() elif event.keysym == 'Escape': writeToFile('Experiment closed') master.destroy() #Dibuixem rectangle def cursorRect(canvas, xPos, yPos, size, on) : #print (xPos,yPos) color = focusColor if on == 0: color = backgroundColor elif on == 2: color = centralColor canvas.create_rectangle(xPos, yPos, xPos+size, yPos+size, fill=color, outline=focusColor) #Dibuixem grid def checkered(canvas, line_distance): # vertical lines at an interval of "line_distance" pixel for x in range(line_distance,canvas_width,line_distance): canvas.create_line(x, 0, x, canvas_height, fill=focusColor) # horizontal lines at an interval of "line_distance" pixel for y in range(line_distance,canvas_height,line_distance): canvas.create_line(0, y, canvas_width, y, fill=focusColor) # cross line if necessary if (int(crossLineFile) == 1): init_w = int(canvas_width/dotSizeNumber)*dotSizeNumber print (init_w) canvas.create_line (0,0,xDotCenter*2, yDotCenter*2, fill=focusColor) canvas.create_line (init_w,0,0,yDotCenter*2, fill=focusColor) #New movement to do def newMovement(canvas, direction): global xLine global yLine #global isUpPressed global numberKeysPressed global dotSize global currentStep global xPosition global yPosition previousOffsetX = 0 previousOffsetY = 0 xLine1 = int(xLine/2) yLine1 = int(yLine/2) xLineCurrent = xLine1 yLineCurrent = yLine1 #print("newMovement (1) numberKeysPressed",numberKeysPressed) prevnumberKeysPressed = numberKeysPressed #if numberKeysPressed == 0 and direction == GoDown: if currentLoop==0 and numberKeysPressed == 0 and direction == GoDown: print("can't go close to center") return if currentLoop==1 and numberKeysPressed == maxKeysPressed and direction == GoUp: print("can't go far from center") return if currentStep == deg90: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetY = 1 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = -1 yLine1 = yLine1+numberKeysPressed yLineCurrent = yLine1 previousOffsetX = 0 elif currentStep == deg45: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetY = 1 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = -1 previousOffsetX = 2 yLine1 = yLine1+numberKeysPressed yLineCurrent = yLine1 xLine1 = xLine1-numberKeysPressed xLineCurrent = xLine1-1 elif currentStep == deg0: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetY = 0 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = 0 previousOffsetX = 2 xLine1 = xLine1-numberKeysPressed xLineCurrent = xLine1-1 elif currentStep == deg315: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetY = -1 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = 1 previousOffsetX = 2 yLine1 = yLine1-numberKeysPressed-1+1 yLineCurrent = yLine1 xLine1 = xLine1-numberKeysPressed xLineCurrent = xLine1-1 elif currentStep == deg270: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetY = -1 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = 1 yLine1 = yLine1-numberKeysPressed+1 yLineCurrent = yLine1-1 previousOffsetX = 0 elif currentStep == deg225: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetY = -1 previousOffsetX = 2 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = 1 previousOffsetX = 0 yLine1 = yLine1-numberKeysPressed-1+1 yLineCurrent = yLine1 xLine1 = xLine1+numberKeysPressed xLineCurrent = xLine1-1 elif currentStep == deg180: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetX = 2 previousOffsetY = 0 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = 0 previousOffsetX = 0 xLine1 = xLine1+numberKeysPressed xLineCurrent = xLine1-1 elif currentStep == deg135: if direction == GoUp: numberKeysPressed = numberKeysPressed -1 previousOffsetY = 1 previousOffsetX = 2 elif direction == GoDown: numberKeysPressed = numberKeysPressed +1 previousOffsetY = -1 yLine1 = yLine1+numberKeysPressed yLineCurrent = yLine1 xLine1 = xLine1+numberKeysPressed xLineCurrent = xLine1-1 #print("newMovement (2) numberKeysPressed",numberKeysPressed,"xLine1",xLine1,"yLine1",yLine1) xNextValue = (int)(xLine1-xDotPosition) yNextValue = (int)(yLineCurrent-yDotPosition) xNextPosition = xLine1 * dotSizeNumber yNextPosition = yLineCurrent * dotSizeNumber #print ('xNextPosition:',xNextPosition,',',yNextPosition,' yNextPosition') if (xNextValue == 0 and yNextValue ==0) or (xNextPosition < 0) or (xNextPosition > wMonitorRes) or (yNextPosition < 0) or (yNextPosition > hMonitorRes): numberKeysPressed = prevnumberKeysPressed; return xPosition = xLine1 * dotSizeNumber yPosition = yLineCurrent * dotSizeNumber xPrevPosition = (xLineCurrent+previousOffsetX) * dotSizeNumber yPrevPosition = (yLineCurrent+previousOffsetY) * dotSizeNumber xPrevValue = xLineCurrent+previousOffsetX-xDotPosition yPrevValue = yLineCurrent+previousOffsetY-yDotPosition #print ('curr:',xPosition,',',yPosition,' xLine1',xNextValue,' yCurrentLine',yNextValue) focus=1 if direction == GoEnter: focus = 0 cursorRect(canvas, xPosition, yPosition, dotSizeNumber, focus) if xPrevValue != 0 or yPrevValue != 0: cursorRect(canvas, xPrevPosition, yPrevPosition, dotSizeNumber, 0) #print ('pre:',xPrevPosition,',',yPrevPosition) master.update() def clearCoords(): global aStr global bStr global cStr global dStr global eStr global fStr global gStr global hStr aStr='' bStr='' cStr='' dStr='' eStr='' fStr='' gStr='' hStr='' def writeCoords2Disk(): global aStr global bStr global cStr global dStr global eStr global fStr global gStr global hStr writeToFile ('A'+aStr) writeToFile ('B'+bStr) writeToFile ('C'+cStr) writeToFile ('D'+dStr) writeToFile ('E'+eStr) writeToFile ('F'+fStr) writeToFile ('G'+gStr) writeToFile ('H'+hStr) writeToFile ('Abis'+aStr) #Coneix el procediment del experiment def experimentProcedure(): global currentStep global currentLoop global numberKeysPressed global xPosition global yPosition global currentEye global applyEyeRight global applyEyeBoth global aStr global bStr global cStr global dStr global eStr global fStr global gStr global hStr xPosFile = (xPosition / dotSizeNumber) - xDotPosition yPosFile = (yPosition / dotSizeNumber) - yDotPosition #Guardem info a disc if currentStep == deg90: aStr = aStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile elif currentStep == deg45: bStr = bStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile elif currentStep == deg0: cStr = cStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile elif currentStep == deg315: dStr = dStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile elif currentStep == deg270: eStr = eStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile elif currentStep == deg225: fStr = fStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile elif currentStep == deg180: gStr = gStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile elif currentStep == deg135: hStr = hStr+sep+'%d'%xPosFile+sep+'%d'%yPosFile #writeToFile ('XPosition = %d'%xPosition + ' YPosition = %d'%yPosition) currentStep = currentStep + 1 if currentStep == experimentDone: print("experimentProcedure -> experimentDone (iteration)") currentLoop = currentLoop +1 print ('currentEye:',currentEye,', currentLoop',currentLoop) #prepara inici del seguent loop currentStep = deg90 maxKeysPressed = -1*yDotPosition-1 if currentLoop ==0: numberKeysPressed = 0 else: numberKeysPressed = maxKeysPressed # si es l'ultima iteracio de l'ull actual if currentLoop == numberOfLoops: if ((currentEye == eyeRight and applyEyeLeft != 1 and applyEyeBoth != 1) or (currentEye == eyeLeft and applyEyeBoth != 1) or (currentEye == eyeBoth)): # final experiment messagebox.showinfo("Perimetria", "Experiment done") # escriu resultat de cada eix i cada iteracio writeCoords2Disk() # tancament arxiu writeToFile('Experiment Done at '+ datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # copiem arxiu per poder fer la mascara des de movilab resultsFilename_mask = "..\Resultats\CampVisual_Resultats.txt" shutil.copy(resultsFilename, resultsFilename_mask) return elif (currentEye==eyeRight and applyEyeLeft==1): # escriu resultat de cada eix i cada iteracio writeCoords2Disk() # prepara per seguent ull clearCoords() writeToFile('Eye'+sep+'Left'+sep+sep+sep) writeToFile('Direction'+sep+'Col1'+sep+'Row1'+sep+'Col2'+sep+'Row2') currentEye=eyeLeft currentLoop=0 messagebox.showinfo("Perimetria", "Eye: Right") elif ((currentEye==eyeRight and applyEyeBoth==1) or (currentEye==eyeLeft and applyEyeBoth==1)): # escriu resultat de cada eix i cada iteracio writeCoords2Disk() # prepara per seguent ull clearCoords() writeToFile('Eye'+sep+'Both'+sep+sep+sep) writeToFile('Direction'+sep+'Col1'+sep+'Row1'+sep+'Col2'+sep+'Row2') currentEye=eyeBoth currentLoop=0 messagebox.showinfo("Perimetria", "Eye: Both") else: print("experimentProcedure -> next step (axis)") # num keys presseed ha de ser zero per tal que el 1r loop vagi de dins a fora if currentLoop ==0: numberKeysPressed = 0 else: numberKeysPressed = 0 if currentStep == deg90: maxKeysPressed = -1*yDotPosition-1 elif currentStep == deg45: maxKeysPressed = -1*yDotPosition-1 elif currentStep == deg0: maxKeysPressed = -1*xDotPosition-1 elif currentStep == deg315: maxKeysPressed = -1*yDotPosition-1 elif currentStep == deg270: maxKeysPressed = -1*yDotPosition-1 elif currentStep == deg225: maxKeysPressed = -1*yDotPosition-1 elif currentStep == deg180: maxKeysPressed = -1*xDotPosition-1 elif currentStep == deg135: maxKeysPressed = -1*yDotPosition-1 numberKeysPressed = maxKeysPressed #Generem GUI master = Tk() master.title('Experiment: Perimetria') master.bind_all('<KeyPress>', kp) canvas_width = wMonitorRes canvas_height = hMonitorRes w = Canvas(master, width=canvas_width, height=canvas_height, bg=backgroundColor) w.pack() dotSizeNumber = int(dotSize) #Dibuixa punt central xLine = wMonitorRes / dotSizeNumber yLine = hMonitorRes / dotSizeNumber cursorRect (w,int(xLine/2)*dotSizeNumber, int(yLine/2)*dotSizeNumber, dotSizeNumber, 2) xDotCenter = int(xLine/2)*dotSizeNumber+(dotSizeNumber/2) yDotCenter = int(yLine/2)*dotSizeNumber+(dotSizeNumber/2) xDotPosition = (int)(xLine/2) yDotPosition = (int)(yLine/2) #Dibuixa grid checkered(w,dotSizeNumber) #valor inicial per 90deg maxKeysPressed = -1*yDotPosition-1 #Initial message strs='Left' if currentEye==eyeRight: strs='Right' elif currentEye==eyeBoth: strs ='Both' #Escriu capçalera a l'arxiu de resultats messagebox.showinfo("Perimetria", 'Eye:'+strs) writeToFile('Eye'+sep+strs+sep+sep+sep+sep+sep) writeToFile('Direction'+sep+'Col1'+sep+'Row1'+sep+'Col2'+sep+'Row2') mainloop()
the-stack_0_8254
from .annospan import AnnoSpan, SpanGroup from .utils import flatten, merge_dicts class MetaSpan(AnnoSpan): def __init__(self, span=None, start=None, end=None, doc=None, metadata={}): if span is None: self.start = start self.end = end self.doc = doc elif isinstance(span, AnnoSpan): self.start = span.start self.end = span.end self.doc = span.doc else: # We assume that span is a spaCy token self.start = span.idx self.end = span.idx + len(span) self.token = span self.doc = doc self.label = self.text self._metadata = span.metadata if isinstance(span, MetaSpan) else metadata def __repr__(self): return "MetaSpan(start={}, end={}, doc={}, metadata={})".format(self.start, self.end, self.doc, self.metadata) def __str__(self): return "{}-{}: '{}' {}".format(self.start, self.end, self.text, self.metadata) def to_dict(self): result = super(MetaSpan, self).to_dict() result.update(self.metadata) result['text'] = self.text return result @property def metadata(self): return self._metadata @metadata.setter def metadata(self, value): self._metadata = value @metadata.deleter def metadata(self): del self._metadata def update_metadata(self, metagen, *args, **kwargs): result = metagen.generate(self, *args, **kwargs) if isinstance(result, dict): self._metadata = merge_dicts([result, self._metadata], unique=True) return self.metadata @property def tokens(self): tokens_tier = self.doc.tiers["spacy.tokens"] tokens = [t.token for t in tokens_tier.spans_contained_by_span(self)] return(tokens) class MetaGroup(MetaSpan, SpanGroup): def __init__(self, base_spans, label=None): assert isinstance(base_spans, list) assert len(base_spans) > 0 self.base_spans = [MetaSpan(span) for span in base_spans] self.doc = base_spans[0].doc self._label = label self._metadata = {} def __repr__(self): return "MetaGroup(start={}, end={}, doc={}, metadata={})".format(self.start, self.end, self.doc, self.metadata) def __str__(self): text = "merged text and metadata:\n {}-{}: '{}'\n {}".format(self.start, self.end, self.text, self.metadata) text += "\ngroup metadata:\n {}".format(self._metadata) text += "\nbase text and metadata:" for span in self.iterate_base_spans(): text += "\n {}-{}: '{}' {}".format(span.start, span.end, span.text, span.metadata) return(text) def __iter__(self): return(iter(flatten(self.base_spans))) # def __next__(self): # for span in flatten(self.base_spans): # return span # raise StopIteration @property def start(self): return(min([s.start for s in self.base_spans])) @property def end(self): return(max([s.end for s in self.base_spans])) @property def text(self): return self.doc.text[self.start:self.end] @property def label(self): if self._label is None: return(self.text) else: return(self._label) @property def metadata(self, **kwargs): metadata_list = [self._metadata] + [s.metadata for s in self.iterate_base_spans()] metadata = merge_dicts(metadata_list, unique=True, **kwargs) return(metadata) def update_group_metadata(self, metagen, *args, **kwargs): result = metagen.generate(self, *args, **kwargs) if isinstance(result, dict): self._metadata = merge_dicts([result, self._metadata], unique=True) return self.metadata def update_base_span_metadata(self, metagen, *args, **kwargs): for span in self.iterate_base_spans(): span.update_metadata(metagen, *args, **kwargs) return self.metadata # I could be convinced that either way is better on this. def update_metadata(self, metagen, *args, **kwargs): self.update_base_span_metadata(metagen, *args, **kwargs) # self.update_group_metadata(metagen, *args, **kwargs) @property def tokens(self): tokens_tier = self.doc.tiers["spacy.tokens"] tokens = [] for span in self.iterate_base_spans(): tokens.append([t.token for t in tokens_tier.spans_contained_by_span(span)]) tokens = flatten(tokens) return(tokens) def append(self, spans): if isinstance(spans, AnnoSpan): self.base_spans.append(spans) elif isinstance(spans, list): self.base_spans.extend(spans)
the-stack_0_8255
from conans import ConanFile, CMake class ValuePtrLiteConan(ConanFile): version = "0.2.1" name = "value-ptr-lite" description = "A C++ smart-pointer with value semantics for C++98, C++11 and later" license = "Boost Software License - Version 1.0. http://www.boost.org/LICENSE_1_0.txt" url = "https://github.com/martinmoene/value-ptr-lite.git" exports_sources = "include/nonstd/*", "CMakeLists.txt", "cmake/*", "LICENSE.txt" settings = "compiler", "build_type", "arch" build_policy = "missing" author = "Martin Moene" def build(self): """Avoid warning on build step""" pass def package(self): """Run CMake install""" cmake = CMake(self) cmake.definitions["VALUE_PTR_LITE_OPT_BUILD_TESTS"] = "OFF" cmake.definitions["VALUE_PTR_LITE_OPT_BUILD_EXAMPLES"] = "OFF" cmake.configure() cmake.install() def package_info(self): self.info.header_only()
the-stack_0_8257
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import pandas as pd import requests UTAHAQ_API_BASE_URI = 'http://meso2.chpc.utah.edu/aq/cgi-bin/download_mobile_archive.cgi' UTAHAQ_API_TOKEN = os.getenv('UTAHAQ_API_TOKEN') def _utahaq_batch_get(stid: str, yr: int, mo: int, datatype: str) -> pd.DataFrame: """Queries UtahAQ API endpoint for single month of data For API reference, see http://utahaq.chpc.utah.edu/aq/cgi-bin/mobile_archive.cgi Args: stid: unique station identifier yr: year desired mo: month desired datatype: measurement dataset identifier, see reference Returns: pd.DataFrame: flattened time, stid, lat/lon, and relevant readings """ yr = str(yr).zfill(4) mo = str(mo).zfill(2) stid = stid.upper() datatype = datatype.lower() uri = ( f'{UTAHAQ_API_BASE_URI}' f'?accesskey={UTAHAQ_API_TOKEN}' f'&stid={stid}' f'&yr={yr}' f'&mo={mo}' f'&datatype={datatype}' ) try: res = pd.read_csv(uri, skiprows=True) except pd.errors.EmptyDataError: return None res = res[res.esampler_error_code == 0] res.index = pd.to_datetime(res.Date + ' ' + res.TimeUTC, utc=True) res = res.rename(columns={ 'esampler_pm25_ugm3': 'pm25_ugm3', 'esampler_rh_pcent': 'rh_pct' }) return res[['pm25_ugm3', 'rh_pct']] def utahaq_api_get(stid: list, start: pd.Timestamp, end: pd.Timestamp, datatype: list) -> pd.DataFrame: """Returns `pd.DataFrame` containing observations For API reference, see http://utahaq.chpc.utah.edu/aq/cgi-bin/mobile_archive.cgi Args: stid: unique station identifier start: start timestamp for returned data end: end timestamp for returned data datatype: measurement dataset identifier, see reference Returns: pd.DataFrame: flattened time, stid, lat/lon, and relevant readings Examples: >>> utahaq_api_get( 'hawth', pd.Timestamp('2019-01-02 00:00:00'), pd.Timestamp('2019-01-02 00:00:30'), 'pm' ) pm25_ugm3 rh_pct 2019-01-02 00:00:00+00:00 3.0 28.0 2019-01-02 00:00:10+00:00 3.0 28.0 2019-01-02 00:00:20+00:00 3.0 28.0 2019-01-02 00:00:30+00:00 2.0 28.0 """ query_dates = pd.date_range(start=start, end=end, freq='MS') if len(query_dates) == 0: query_dates = [start] df_list = [] for date in query_dates: df_list.append( _utahaq_batch_get( stid=stid, yr=date.year, mo=date.month, datatype=datatype ) ) df = pd.concat(df_list) return df[(df.index >= start) & (df.index <= end)]
the-stack_0_8258
#!/usr/bin/env python3 # Copyright (c) 2013-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Generate seeds.txt from Pieter's DNS seeder # NSEEDS = 512 MAX_SEEDS_PER_ASN = 2 MIN_BLOCKS = 337600 # These are hosts that have been observed to be behaving strangely (e.g. # aggressively connecting to every node). SUSPICIOUS_HOSTS = { "130.211.129.106", "178.63.107.226", "83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6", "54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211", "54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214", "54.94.195.96", "54.94.200.247" } import re import sys import dns.resolver import collections PATTERN_IPV4 = re.compile( r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") PATTERN_ONION = re.compile( r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$") # Used to only select nodes with a user agent string compatible with the # BCC/UAHF specification. PATTERN_AGENT = re.compile( r"^(/BitcoinABC:0.15.(\d+)\(\S+\)/|/BitcoinXT:0.11.0G\(\S+\)/|/BUCash:1.1.(\d+)\(\S+\)/|/Classic:1.3.(\d+)\(\S+\)/)") def parseline(line): sline = line.split() if len(sline) < 11: return None # All BCC clients apart BU and Classic has a space in the useragent string if len(sline) == 13: sline[11] = sline[11] + sline[12] if len(sline) == 14: sline[11] = sline[11] + sline[12] + sline[13] m = PATTERN_IPV4.match(sline[0]) sortkey = None ip = None if m is None: m = PATTERN_IPV6.match(sline[0]) if m is None: m = PATTERN_ONION.match(sline[0]) if m is None: return None else: net = 'onion' ipstr = sortkey = m.group(1) port = int(m.group(2)) else: net = 'ipv6' if m.group(1) in ['::']: # Not interested in localhost return None ipstr = m.group(1) sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds port = int(m.group(2)) else: # Do IPv4 sanity check ip = 0 for i in range(0, 4): if int(m.group(i + 2)) < 0 or int(m.group(i + 2)) > 255: return None ip = ip + (int(m.group(i + 2)) << (8 * (3 - i))) if ip == 0: return None net = 'ipv4' sortkey = ip ipstr = m.group(1) port = int(m.group(6)) # Skip bad results. if sline[1] == 0: return None # Extract uptime %. uptime30 = float(sline[7][:-1]) # Extract Unix timestamp of last success. lastsuccess = int(sline[2]) # Extract protocol version. version = int(sline[10]) # Extract user agent. agent = sline[11][1:-1] # Extract service flags. service = int(sline[9], 16) # Extract blocks. blocks = int(sline[8]) # Construct result. return { 'net': net, 'ip': ipstr, 'port': port, 'ipnum': ip, 'uptime': uptime30, 'lastsuccess': lastsuccess, 'version': version, 'agent': agent, 'service': service, 'blocks': blocks, 'sortkey': sortkey, } def filtermultiport(ips): '''Filter out hosts with more nodes per IP''' hist = collections.defaultdict(list) for ip in ips: hist[ip['sortkey']].append(ip) return [value[0] for (key, value) in list(hist.items()) if len(value) == 1] # Based on Greg Maxwell's seed_filter.py def filterbyasn(ips, max_per_asn, max_total): # Sift out ips by type ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4'] ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6'] ips_onion = [ip for ip in ips if ip['net'] == 'onion'] # Filter IPv4 by ASN result = [] asn_count = {} for ip in ips_ipv4: if len(result) == max_total: break try: asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) if asn not in asn_count: asn_count[asn] = 0 if asn_count[asn] == max_per_asn: continue asn_count[asn] += 1 result.append(ip) except: sys.stderr.write( 'ERR: Could not resolve ASN for "' + ip['ip'] + '"\n') # TODO: filter IPv6 by ASN # Add back non-IPv4 result.extend(ips_ipv6) result.extend(ips_onion) return result def main(): lines = sys.stdin.readlines() ips = [parseline(line) for line in lines] # Skip entries with valid address. ips = [ip for ip in ips if ip is not None] # Skip entries from suspicious hosts. ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS] # Enforce minimal number of blocks. ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] # Require service bit 1. ips = [ip for ip in ips if (ip['service'] & 1) == 1] # Require at least 50% 30-day uptime. # TODO set it back to 50% once nodes will have enough uptime. ips = [ip for ip in ips if ip['uptime'] > 0] # Require a known and recent user agent. ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] # Sort by availability (and use last success as tie breaker) ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) # Filter out hosts with multiple bitcoin ports, these are likely abusive ips = filtermultiport(ips) # Look up ASNs and limit results, both per ASN and globally. # TODO during this bootstrap phase we need any BCC full nodes # active on the network, uncomment the following line once the # BCC chain will be consolidated. # ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) # Sort the results by IP address (for deterministic output). ips.sort(key=lambda x: (x['net'], x['sortkey'])) for ip in ips: if ip['net'] == 'ipv6': print('[%s]:%i' % (ip['ip'], ip['port'])) else: print('%s:%i' % (ip['ip'], ip['port'])) if __name__ == '__main__': main()
the-stack_0_8259
from ..db import * from .. import currentUser from ..accounting import UsageStatistics from ..lib import logging from ..lib.error import UserError from ..generic import * class Organization(Entity, BaseDocument): name = StringField(unique=True, required=True) totalUsage = ReferenceField(UsageStatistics, db_field='total_usage', required=True, reverse_delete_rule=DENY) label = StringField(required=True) homepageUrl = URLField(db_field='homepage_url') imageUrl = URLField(db_field='image_url') description = StringField() meta = { 'ordering': ['name'], 'indexes': [ 'name' ] } @property def sites(self): from .site import Site return Site.objects(organization=self) @property def users(self): from ..auth import User return User.objects(organization=self) def init(self, attrs): self.totalUsage = UsageStatistics().save() self.modify(attrs) def checkPermissions(self, *args, **kwargs): user = currentUser() if user.hasFlag(Flags.GlobalAdmin): return True if user.hasFlag(Flags.OrgaAdmin) and user.organization == self: return True return False def _checkRemove(self): UserError.check(self.checkPermissions(), code=UserError.DENIED, message="Not enough permissions") if self.id: UserError.check(not self.sites, code=UserError.NOT_EMPTY, message="Organization still has sites") UserError.check(not self.users, code=UserError.NOT_EMPTY, message="Organization still has users") def _remove(self): logging.logMessage("remove", category="organization", name=self.name) if self.id: self.delete() self.totalUsage.remove() def updateUsage(self): self.totalUsage.updateFrom([user.totalUsage for user in self.users]) def __str__(self): return self.name def __repr__(self): return "Organization(%s)" % self.name @classmethod def get(cls, name, **kwargs): try: return Organization.objects.get(name=name, **kwargs) except Organization.DoesNotExist: return None @classmethod def create(cls, name, label="", attrs=None): if not attrs: attrs = {} UserError.check(currentUser().hasFlag(Flags.GlobalAdmin), code=UserError.DENIED, message="Not enough permissions") UserError.check('/' not in name, code=UserError.INVALID_VALUE, message="Organization name may not include a '/'") logging.logMessage("create", category="site", name=name, label=label) organization = Organization(name=name, label=label) try: attrs_ = attrs.copy() attrs_['name'] = name attrs_['label'] = label organization.init(attrs_) organization.save() except: organization.remove() raise return organization ACTIONS = { Entity.REMOVE_ACTION: Action(fn=_remove, check=_checkRemove) } ATTRIBUTES = { "name": Attribute(field=name, check=checkPermissions, schema=schema.Identifier()), "label": Attribute(field=label, check=checkPermissions, schema=schema.String()), "homepage_url": Attribute(field=homepageUrl, check=checkPermissions, schema=schema.URL(null=True)), "image_url": Attribute(field=imageUrl, check=checkPermissions, schema=schema.URL(null=True)), "description": Attribute(field=description, check=checkPermissions, schema=schema.String()) } from ..auth import Flags
the-stack_0_8260
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file performs Sling based entity linking on NQ. The file iterates through entire train and dev set of NQ. For every example it does entity linking on long answer candidates, annotated long and short answer and questiopn. Every paragraph in the dataset is augmented with an entity map from every token to it's entity id. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import json import os import sling import sling.flags as flags import sling.task.entity as entity import sling.task.workflow as workflow import tensorflow.compat.v1 as tf # Calling these 'args' to avoid conflicts with sling flags args = tf.flags ARGS = args.FLAGS args.DEFINE_string("nq_dir", "", "NQ data location") args.DEFINE_string("files_dir", "", "Preprocess files location") args.DEFINE_string("output_data_dir", "", "Location to write augmented data to") args.DEFINE_boolean("annotate_candidates", True, "Flag to annotate candidates") args.DEFINE_boolean("annotate_long_answers", True, "Flag to annotate long answer") args.DEFINE_boolean("annotate_short_answers", True, "Flag to annotate short answers") args.DEFINE_boolean("annotate_question", True, "Flag to annotate questions") def extract_and_tokenize_text(item, tokens): """Extracts the tokens in passage, tokenizes them using sling tokenizer.""" start_token = item["start_token"] end_token = item["end_token"] if start_token >= 0 and end_token >= 0: non_html_tokens = [ x for x in tokens[start_token:end_token] if not x["html_token"] ] answer = " ".join([x["token"] for x in non_html_tokens]) answer_map = [idx for idx, x in enumerate(non_html_tokens)] doc = sling.tokenize(str(answer)) return answer, answer_map, doc return "", [], None def is_sling_entity(item): return isinstance( item[0]) == sling.Frame and "id" in item[0] and item[0]["id"].startswith( "Q") def prepare_sling_input_corpus(nq_data, sling_input_corpus): """Parse each paragrapgh in NQ (LA candidate, LA, SA, question). Prepare a sling corpus to do entity linking. Args: nq_data: A python dictionary containint NQ data of 1 train/dev shard sling_input_corpus: A filename string to write the sling format documents into """ corpus = sling.RecordWriter(sling_input_corpus) for i in nq_data.keys(): tokens = nq_data[i]["document_tokens"] if ARGS.annotate_candidates: for idx, la_cand in enumerate(nq_data[i]["long_answer_candidates"]): answer, answer_map, doc = extract_and_tokenize_text(la_cand, tokens) if answer: nq_data[i]["long_answer_candidates"][idx]["text_answer"] = answer nq_data[i]["long_answer_candidates"][idx]["answer_map"] = answer_map key = i + "|candidate|" + str(idx) + "|i" corpus.write(key, doc.frame.data(binary=True)) if ARGS.annotate_short_answers: for idx, ann in enumerate(nq_data[i]["annotations"]): short_ans = ann["short_answers"] if not short_ans: continue for sid in range(len(short_ans)): ans = short_ans[sid] answer, answer_map, doc = extract_and_tokenize_text(ans, tokens) if answer: nq_data[i]["annotations"][idx]["short_answers"][sid][ "text_answer"] = answer nq_data[i]["annotations"][idx]["short_answers"][sid][ "answer_map"] = answer_map key = i + "|annotated_short_answer|" + str(idx) + "|" + str(sid) corpus.write(key, doc.frame.data(binary=True)) if ARGS.annotate_long_answers: for idx, ann in enumerate(nq_data[i]["annotations"]): long_ans = ann["long_answer"] answer, answer_map, doc = extract_and_tokenize_text(long_ans, tokens) if answer: nq_data[i]["annotations"][idx]["long_answer"]["text_answer"] = answer nq_data[i]["annotations"][idx]["long_answer"][ "answer_map"] = answer_map key = i + "|annotated_long_answer|" + str(idx) + "|i" corpus.write(key, doc.frame.data(binary=True)) if ARGS.annotate_question: doc = sling.tokenize(str(nq_data[i]["question_text"])) key = i + "|question|i|i" corpus.write(key, doc.frame.data(binary=True)) corpus.close() def sling_entity_link(sling_input_corpus, sling_output_corpus): """Does sling entity linking and created linked output corpus.""" labeler = entity.EntityWorkflow("wiki-label") unannotated = labeler.wf.resource( sling_input_corpus, format="records/document") annotated = labeler.wf.resource( sling_output_corpus, format="records/document") labeler.label_documents(indocs=unannotated, outdocs=annotated) workflow.run(labeler.wf) def extract_entity_mentions(nq_data, labelled_record): """Parse ourput corpus and create map from tokens to entity ids. Args: nq_data: A python dictionary containint NQ data of 1 train/dev shard labelled_record: Sling output document with labelled paragraphs Returns: nq_data: Original object augmented with entity maps """ recin = sling.RecordReader(labelled_record) commons = sling.Store() docschema = sling.DocumentSchema(commons) commons.freeze() cnt = 1 for key, value in recin: store = sling.Store(commons) doc = sling.Document(store.parse(value), store, docschema) index, ans_type, idx, ans_id = key.decode("utf-8").split("|") cnt += 1 entity_map = {} # Parse entity mentions labelled by sling for m in doc.mentions: e = [i["is"] for i in m.evokes()] if not e: continue if is_sling_entity(e): e_val = e[0]["id"] if m.begin in entity_map: entity_map[m.begin].append((m.end, e_val)) else: entity_map[m.begin] = [(m.end, e_val)] if ans_type == "annotated_long_answer": nq_data[index]["annotations"][int( idx)]["long_answer"]["entity_map"] = entity_map elif ans_type == "question": nq_data[index]["question_entity_map"] = entity_map elif ans_type == "annotated_short_answer": nq_data[index]["annotations"][int(idx)]["short_answers"][int( ans_id)]["entity_map"] = entity_map else: nq_data[index]["long_answer_candidates"][int( idx)]["entity_map"] = entity_map return nq_data def extract_nq_data(nq_file): """Read nq shard file and return dict of nq_data.""" fp = gzip.GzipFile(fileobj=tf.gfile.Open(nq_file, "rb")) lines = fp.readlines() data = {} counter = 0 for line in lines: data[str(counter)] = json.loads(line.decode("utf-8")) tok = [] for j in data[str(counter)]["document_tokens"]: tok.append(j["token"]) data[str(counter)]["full_document_long"] = " ".join(tok) counter += 1 return data def get_shard(mode, task_id, shard_id): return "nq-%s-%02d%02d" % (mode, task_id, shard_id) def get_full_filename(data_dir, mode, task_id, shard_id): return os.path.join( data_dir, "%s/%s.jsonl.gz" % (mode, get_shard(mode, task_id, shard_id))) def get_examples(data_dir, mode, task_id, shard_id): """Reads NQ data, does sling entity linking and returns augmented data.""" file_path = get_full_filename(data_dir, mode, task_id, shard_id) tf.logging.info("Reading file: %d" % (file_path)) if not os.path.exists(file_path): return None nq_data = extract_nq_data(file_path) tf.logging.info("NQ data Size: " + str(len(nq_data.keys()))) tf.logging.info("Preparing sling corpus: ") sling_input_corpus = os.path.join(ARGS.files_dir, "sling_input_corpus.rec") sling_output_corpus = os.path.join(ARGS.files_dir, "nq_labelled_output.rec") prepare_sling_input_corpus(nq_data, sling_input_corpus) tf.logging.info("Performing Sling NER Labeling") sling_entity_link(sling_input_corpus, sling_output_corpus) fact_extracted_data = extract_entity_mentions(nq_data, sling_output_corpus) return fact_extracted_data def main(_): workflow.startup() max_tasks = {"train": 50, "dev": 5} max_shards = {"train": 6, "dev": 16} for mode in ["train", "dev"]: # Parse all shards in each mode # Currently sequentially, can be parallelized later for task_id in range(0, max_tasks[mode]): for shard_id in range(0, max_shards[mode]): nq_augmented_data = get_examples(ARGS.nq_dir, mode, task_id, shard_id) if nq_augmented_data is None: continue path = get_full_filename(ARGS.output_data_dir, mode, task_id, shard_id) with gzip.GzipFile(fileobj=tf.gfile.Open(path, "w")) as output_file: for idx in nq_augmented_data.keys(): json_line = nq_augmented_data[idx] output_file.write(json.dumps(json_line) + "\n") workflow.shutdown() if __name__ == "__main__": # This will fail if non-sling CMDLine Args are given. # Will modify sling separately to parse known args flags.parse() tf.app.run()
the-stack_0_8263
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @trojanzhex from pyrogram import filters from pyrogram import Client as trojanz from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton from config import Config from script import Script from helpers.progress import PRGRS from helpers.tools import clean_up from helpers.download import download_file, DATA from helpers.ffmpeg import extract_audio, extract_subtitle @trojanz.on_callback_query() async def cb_handler(client, query): if query.data == "start_data": await query.answer() keyboard = InlineKeyboardMarkup([ [InlineKeyboardButton("HELP", callback_data="help_data"), InlineKeyboardButton("ABOUT", callback_data="about_data")], [InlineKeyboardButton("⭕️ JOIN OUR CHANNEL ⭕️", url="https://t.me/TroJanzHEX")] ]) await query.message.edit_text( Script.START_MSG.format(query.from_user.mention), reply_markup=keyboard, disable_web_page_preview=True ) return elif query.data == "help_data": await query.answer() keyboard = InlineKeyboardMarkup([ [InlineKeyboardButton("BACK", callback_data="start_data"), InlineKeyboardButton("ABOUT", callback_data="about_data")], [InlineKeyboardButton("⭕️ SUPPORT ⭕️", url="https://t.me/TroJanzSupport")] ]) await query.message.edit_text( Script.HELP_MSG, reply_markup=keyboard, disable_web_page_preview=True ) return elif query.data == "about_data": await query.answer() keyboard = InlineKeyboardMarkup([ [InlineKeyboardButton("BACK", callback_data="help_data"), InlineKeyboardButton("START", callback_data="start_data")], [InlineKeyboardButton("SOURCE CODE", url="https://github.com/TroJanzHEX/Streams-Extractor")] ]) await query.message.edit_text( Script.ABOUT_MSG, reply_markup=keyboard, disable_web_page_preview=True ) return elif query.data == "download_file": await query.answer() await query.message.delete() await download_file(client, query.message) elif query.data == "progress_msg": try: msg = "Progress Details...\n\nCompleted : {current}\nTotal Size : {total}\nSpeed : {speed}\nProgress : {progress:.2f}%\nETA: {eta}" await query.answer( msg.format( **PRGRS[f"{query.message.chat.id}_{query.message.message_id}"] ), show_alert=True ) except: await query.answer( "Processing your file...", show_alert=True ) elif query.data == "close": await query.message.delete() await query.answer( "Cancelled...", show_alert=True ) elif query.data.startswith('audio'): await query.answer() try: stream_type, mapping, keyword = query.data.split('_') data = DATA[keyword][int(mapping)] await extract_audio(client, query.message, data) except: await query.message.edit_text("**Details Not Found**") elif query.data.startswith('subtitle'): await query.answer() try: stream_type, mapping, keyword = query.data.split('_') data = DATA[keyword][int(mapping)] await extract_subtitle(client, query.message, data) except: await query.message.edit_text("**Details Not Found**") elif query.data.startswith('cancel'): try: query_type, mapping, keyword = query.data.split('_') data = DATA[keyword][int(mapping)] await clean_up(data['location']) await query.message.edit_text("**Cancelled...**") await query.answer( "Cancelled...", show_alert=True ) except: await query.answer() await query.message.edit_text("**Details Not Found**")
the-stack_0_8264
"""! This file contains some pair potentials. \ingroup lammpstools """ import lammpstools import dumpreader import numpy as np import math import sys def make_pair_table( fname, name, pair_pot, N, mode = "R", lo = 1.0, hi = 10.0 ): "Dumps a LAMMPS-style pair table to given file." if mode == "R": dr = (hi - lo)/(N-1) elif mode == "RSQ": print >> sys.stderr, "Mode RSQ not supported!" return -1 elif mode == "BITMAP": print >> sys.stderr, "Mode BITMAP not supported!" return -1 else: print >> sys.stderr, "Mode ", mode, " not recognized!" return -1 # First test the given potential: if lammpstools.test_potential( pair_pot, lo, hi, 1e-4, 1e-8 ): print >> sys.stderr, "Potential not consistent!" # return -2 # Fill table: fp = open(fname,"w") use_fprime = False if hasattr( pair_pot, "force_prime" ): "use_fprime = True" print >> fp, name if use_fprime: print >> fp, "N %d %s %f %f" % (N, mode, lo, hi) else: fplo = pair_pot.force_prime(lo) fphi = pair_pot.force_prime(hi) print >> fp, "N %d %s %e %e FPRIME %f %f" % (N, mode, lo, hi, fplo, fphi) print >> fp, "" for i in range(0,N): r = lo + i*dr E = pair_pot.energy(r) f = pair_pot.force(r) print >> fp, "%d %e %e %e" % (i,r,E,f) return 0
the-stack_0_8268
#=============================================================================== # Copyright 2009 Matt Chaput # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #=============================================================================== from collections import defaultdict from threading import Lock from whoosh.fields import UnknownFieldError from whoosh.index import Index from whoosh.ramdb.ramreading import RamIndexReader from whoosh.util import protected class RamIndex(Index): def __init__(self, schema): self.schema = schema self.docnum = 0 self._sync_lock = Lock() self.is_closed = False self.clear() def clear(self): # Maps fieldname -> a sorted list of term texts in that field self.termlists = defaultdict(list) # Maps fieldnames to dictionaries of term -> posting list self.invertedindex = {} for fieldname in self.schema.names(): self.invertedindex[fieldname] = defaultdict(list) # Maps terms -> index frequencies self.indexfreqs = defaultdict(int) # Maps docnum -> stored field dicts self.storedfields = {} # Maps (docnum, fieldname) -> field length self.fieldlengths = defaultdict(int) # Maps (docnum, fieldname) -> posting list self.vectors = {} # Contains docnums of deleted documents self.deleted = set() def close(self): del self.termlists del self.invertedindex del self.indexfreqs del self.storedfields del self.fieldlengths del self.vectors del self.deleted self.is_closed = True def doc_count_all(self): return len(self.storedfields) def doc_count(self): return len(self.storedfields) - len(self.deleted) def field_length(self, fieldname): return sum(l for docnum_fieldname, l in self.fieldlengths.iteritems() if docnum_fieldname[1] == fieldname) def max_field_length(self, fieldname): return max(l for docnum_fieldname, l in self.fieldlengths.iteritems() if docnum_fieldname[1] == fieldname) def reader(self): return RamIndexReader(self) def writer(self): return self @protected def add_field(self, *args, **kwargs): self.schema.add_field(*args, **kwargs) @protected def remove_field(self, fieldname): self.schema.remove_field(fieldname) if fieldname in self.termlists: del self.termlists[fieldname] for fn, text in self.indexfreqs.iterkeys(): if fn == fieldname: del self.indexfreqs[(fn, text)] for sfields in self.storedfields.itervalues(): if fieldname in sfields: del sfields[fieldname] for docnum, fn in self.fieldlengths.iterkeys(): if fn == fieldname: del self.fieldlengths[(docnum, fn)] if fieldname in self.fieldlength_maxes: del self.fieldlength_maxes[fieldname] for docnum, fn in self.vectors.iterkeys(): if fn == fieldname: del self.vectors[(docnum, fn)] @protected def delete_document(self, docnum, delete=True): if delete: self.deleted.add(docnum) else: self.deleted.remove(docnum) @protected def delete_by_term(self, fieldname, text): inv = self.invertedindex if fieldname in inv: terms = inv[fieldname] if text in terms: postings = terms[text] for p in postings: self.deleted.add(p[0]) @protected def delete_by_query(self, q, searcher=None): s = self.searcher() for docnum in q.docs(s): self.deleted.add(docnum) def has_deletions(self): return bool(self.deleted) @protected def optimize(self): deleted = self.deleted # Remove deleted documents from stored fields storedfields = self.storedfields for docnum in deleted: del storedfields[docnum] # Remove deleted documents from inverted index removedterms = defaultdict(set) for fieldname in self.schema.names(): inv = self.invertedindex[fieldname] for term, postlist in inv.iteritems(): inv[term] = [x for x in postlist if x[0] not in deleted] # Remove terms that no longer have any postings after the # documents are deleted for term in inv.keys(): if not inv[term]: removedterms[fieldname].add(term) del inv[term] # If terms were removed as a result of document deletion, # update termlists and indexfreqs termlists = self.termlists for fieldname, removed in removedterms.iteritems(): termlists[fieldname] = [t for t in termlists[fieldname] if t not in removed] for text in removed: del self.indexfreqs[(fieldname, text)] # Remove documents from field lengths fieldlengths = self.fieldlengths for docnum, fieldname in fieldlengths.keys(): if docnum in deleted: del fieldlengths[(docnum, fieldname)] # Remove documents from vectors vectors = self.vectors for docnum, fieldname in vectors.keys(): if docnum in deleted: del vectors[(docnum, fieldname)] # Reset deleted list self.deleted = set() @protected def add_document(self, **fields): schema = self.schema invertedindex = self.invertedindex indexfreqs = self.indexfreqs fieldlengths = self.fieldlengths fieldnames = [name for name in sorted(fields.keys()) if not name.startswith("_")] storedvalues = {} for name in fieldnames: if name not in schema: raise UnknownFieldError("There is no field named %r" % name) for name in fieldnames: value = fields.get(name) if value: field = schema[name] newwords = set() fielddict = invertedindex[name] # If the field is indexed, add the words in the value to the # index if field.indexed: # Count of all terms in the value count = 0 # Count of UNIQUE terms in the value unique = 0 for w, freq, weight, valuestring in field.index(value): if w not in fielddict: newwords.add(w) fielddict[w].append((self.docnum, weight, valuestring)) indexfreqs[(name, w)] += freq count += freq unique += 1 self.termlists[name] = sorted(set(self.termlists[name]) | newwords) if field.scorable: fieldlengths[(self.docnum, name)] = count vector = field.vector if vector: vlist = sorted((w, weight, valuestring) for w, freq, weight, valuestring in vector.word_values(value)) self.vectors[(self.docnum, name)] = vlist if field.stored: storedname = "_stored_" + name if storedname in fields: stored_value = fields[storedname] else : stored_value = value storedvalues[name] = stored_value self.storedfields[self.docnum] = storedvalues self.docnum += 1 @protected def add_reader(self, reader): startdoc = self.docnum has_deletions = reader.has_deletions() if has_deletions: docmap = {} fieldnames = set(self.schema.names()) for docnum in xrange(reader.doc_count_all()): if (not has_deletions) or (not reader.is_deleted(docnum)): d = dict(item for item in reader.stored_fields(docnum).iteritems() if item[0] in fieldnames) self.storedfields[self.docnum] = d if has_deletions: docmap[docnum] = self.docnum for fieldname, length in reader.doc_field_lengths(docnum): if fieldname in fieldnames: self.fieldlengths[(self.docnum, fieldname)] = length for fieldname in reader.vector_names(): if (fieldname in fieldnames and reader.has_vector(docnum, fieldname)): vpostreader = reader.vector(docnum, fieldname) self.vectors[(self.docnum, fieldname)] = list(vpostreader.all_items()) vpostreader.close() self.docnum += 1 for fieldname, text, _, _ in reader: if fieldname in fieldnames: postreader = reader.postings(fieldname, text) while postreader.is_active(): docnum = postreader.id() valuestring = postreader.value() weight = postreader.weight() if has_deletions: newdoc = docmap[docnum] else: newdoc = startdoc + docnum self.invertedindex[fieldname][text].append((newdoc, weight, valuestring)) postreader.next()
the-stack_0_8269
import requests #import sys from selenium import webdriver import re from bs4 import BeautifulSoup #from bs4 import UnicodeDammit #sys.stdout = codecs.getwriter("iso-8859-8")(sys.stdout, 'xmlcharrefreplace') import os project_dir = os.path.dirname(os.path.abspath(__file__)) #project_dir = project_dir.replace('\\','/') #phantom_linuxdir = project_dir + '/phantom/linux/bin/phantomjs' #phantom_linuxdir= '/app/getrw_tiki/phantom/linux/bin/phantomjs' phantom_windir = project_dir + '/phantom/windows/bin/phantomjs' #print phantom_linuxdir #client = webdriver.PhantomJS(executable_path=r'/app/getrw_tiki/phantom/linux/bin/phantomjs') ### crawler js client = webdriver.PhantomJS() #client = webdriver.PhantomJS(phantom_windir) ### crawler js class object: def __init__(self,lnkweb,comment,lnkImg): #comment co kieu la chuoi self.lnkweb = lnkweb self.comment = comment self.lnkImg = lnkImg def returnvalue(self): # tra ve cac gia tri cua doi tuong obj = list() # tao mang rong obj.append(self.lnkweb) #them cac gia tri thuoc tinh vao list obj.append(self.comment) obj.append(self.lnkImg) return obj #tra ve list chua cac gia tri doi tuong ######### start lazada###################### def get_comment(link): #lazada r= requests.get(link) soup = BeautifulSoup(r.text,'html.parser') fnd = soup.find_all("div","review_criteria") #print len(fnd) c = list() if len(fnd)!= 0: for i in fnd : comment = i.text c.append(comment) return c def search_vatgia(keywords): k=keywords.replace(' ','+') link = 'http://vatgia.com/home/quicksearch.php?keyword='+k+'&sort=5' client.get(link) soup = BeautifulSoup(client.page_source,"html.parser") fclass = soup.find_all("a","picture_link",limit=5) return fclass ''' def get_src(input): # get link src tu html element lazada rexp='(src=")(.*)"' #lay link cua the span f = re.compile(str(rexp)).findall(str(input)) if f == []: print "1" rexp='(url\()(.*)\)' f = re.compile(str(rexp)).findall(str(input)) return f[0][1]''' #search_vatgia("iphone 5")
the-stack_0_8270
import argparse import os import numpy as np import torch import torch.nn.functional as F from pil import Image from Network import UNet from utils import resize_and_crop, normalize, split_img_into_squares, hwc_to_chw, merge_masks from utils import plot_img_and_mask from torchvision import transforms def predict_img(net, full_img, scale_factor=0.25, out_threshold=0.5, use_dense_crf=True, use_gpu=True): net.eval() img_height = full_img.size[1] img_width = full_img.size[0] img = resize_and_crop(full_img, scale=scale_factor) img = normalize(img) left_square, right_square = split_img_into_squares(img) left_square = hwc_to_chw(left_square) right_square = hwc_to_chw(right_square) X_left = torch.from_numpy(left_square).unsqueeze(0) X_right = torch.from_numpy(right_square).unsqueeze(0) if use_gpu: X_left = X_left.cuda() X_right = X_right.cuda() with torch.no_grad(): output_left = net(X_left) output_right = net(X_right) left_probs = output_left.squeeze(0) right_probs = output_right.squeeze(0) tf = transforms.Compose( [ transforms.ToPILImage(), transforms.Resize(img_height), transforms.ToTensor() ] ) left_probs = tf(left_probs.cpu()) right_probs = tf(right_probs.cpu()) left_mask_np = left_probs.squeeze().cpu().numpy() right_mask_np = right_probs.squeeze().cpu().numpy() full_mask = merge_masks(left_mask_np, right_mask_np, img_width) # if use_dense_crf: # full_mask = dense_crf(np.array(full_img).astype(np.uint8), full_mask) # return full_mask > out_threshold def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--model', '-m', default='CP1.pth', metavar='FILE', help="Specify the file in which is stored the model" " (default : 'CP1.pth')") parser.add_argument('--input', '-i', metavar='INPUT', nargs='+', help='filenames of input images', required=True) parser.add_argument('--output', '-o', metavar='INPUT', nargs='+', help='filenames of ouput images') parser.add_argument('--cpu', '-c', action='store_true', help="Do not use the cuda version of the net", default=False) parser.add_argument('--viz', '-v', action='store_true', help="Visualize the images as they are processed", default=False) parser.add_argument('--no-save', '-n', action='store_true', help="Do not save the output masks", default=False) parser.add_argument('--no-crf', '-r', action='store_true', help="Do not use dense CRF postprocessing", default=False) parser.add_argument('--mask-threshold', '-t', type=float, help="Minimum probability value to consider a mask pixel white", default=0.5) parser.add_argument('--scale', '-s', type=float, help="Scale factor for the input images", default=0.25) return parser.parse_args() def get_output_filenames(args): in_files = args.input out_files = [] if not args.output: for f in in_files: pathsplit = os.path.splitext(f) out_files.append("{}_OUT{}".format(pathsplit[0], pathsplit[1])) elif len(in_files) != len(args.output): print("Error : Input files and output files are not of the same length") raise SystemExit() else: out_files = args.output return out_files def mask_to_image(mask): return Image.fromarray((mask * 255).astype(np.uint8)) if __name__ == "__main__": args = get_args() in_files = args.input out_files = get_output_filenames(args) net = UNet(n_channels=3, n_classes=1) print("Loading model {}".format(args.model)) if not args.cpu: print("Using CUDA version of the net, prepare your GPU !") net.cuda() net.load_state_dict(torch.load(args.model)) else: net.cpu() net.load_state_dict(torch.load(args.model, map_location='cpu')) print("Using CPU version of the net, this may be very slow") print("Model loaded !") for i, fn in enumerate(in_files): print("\nPredicting image {} ...".format(fn)) img = Image.open(fn) if img.size[0] < img.size[1]: print("Error: image height larger than the width") mask = predict_img(net=net, full_img=img, scale_factor=args.scale, out_threshold=args.mask_threshold, use_dense_crf=not args.no_crf, use_gpu=not args.cpu) print(mask) plot_img_and_mask(img, mask) exit(0) if args.viz: print("Visualizing results for image {}, close to continue ...".format(fn)) plot_img_and_mask(img, mask) if not args.no_save: out_fn = out_files[i] result = mask_to_image(mask) result.save(out_files[i]) print("Mask saved to {}".format(out_files[i]))
the-stack_0_8273
import math, networkx as nx, timeit, unittest class ConnectTheDotsBigDataTest(unittest.TestCase): """ Benchmarking suite for ConnectTheDots (large datasets) """ def test_bc_runtime(self): """ Test time needed to calculate betweenness centrality """ TEST_CASES = [] # add (V, E) tuples NUM_TRIALS = 10 def generate_graph(V, E): """ Return a random Barabasi-Albert graph with V nodes and E edges """ m = (V - math.sqrt(V ** 2 - 4 * E)) / 2 return nx.barabasi_albert_graph(V, int(m)) def calculate_bc(G): """ Calculate betweenness centrality for graph G """ return nx.betweenness_centrality(G) if len(TEST_CASES) > 0: print('\n\n[ Runtime ]\n') for (V, E) in TEST_CASES: print('V = ' + str(V) + ', E = ' + str(E) + '\n') G = generate_graph(V, E) for i in range(NUM_TRIALS): start = timeit.default_timer() calculate_bc(G) stop = timeit.default_timer() print(stop - start) print('') def test_bc_estimation(self): """ Test accuracy of different k-values for betweenness centrality estimation """ TEST_CASES = [] # add (V, E, k) tuples NUM_TRIALS = 1 def generate_graph(V, E): """ Return a random Barabasi-Albert graph with V nodes and E edges """ m = (V - math.sqrt(V ** 2 - 4 * E)) / 2 return nx.barabasi_albert_graph(V, int(m)) def calculate_bc(G, k=None): """ Calculate betweenness centrality for graph G using k pivots """ return nx.betweenness_centrality(G, k) def round_float(n): """ Return string representation of float n rounded to six decimal places """ return '{:f}'.format(n) def error_pct(error, actual): """ Return string representation of error % of estimate from actual """ if actual > 0: return '{:.1%}'.format(error / actual) else: return '--' if len(TEST_CASES) > 0: print('\n\n[ Estimation ]\n') for (V, E, k) in TEST_CASES: print('V = ' + str(V) + ', E = ' + str(E) + ', k = ' + str(k) + '\n') G = generate_graph(V, E) bc = calculate_bc(G) for i in range(NUM_TRIALS): # estimate = calculate_bc(G, key) # print 'node estimate actual error % error' # print '---- -------- -------- -------- --------' # for key, val in estimate.iteritems(): # error = abs(bc[key] - val) # print ' '.join(['{:04}'.format(key), round_float(val), round_float(bc[key]), round_float(error), error_pct(error, bc[key])]) # print '' start = timeit.default_timer() estimate = calculate_bc(G, k) stop = timeit.default_timer() runtime = stop - start max_error = 0 max_error_pct = 0 for key, val in estimate.items(): error = abs(bc[key] - val) max_error = max(max_error, error) if bc[key] > 0: max_error_pct = max(max_error_pct, error / bc[key]) print(', ' .join([round_float(max_error), '{:.1%}'.format(max_error_pct), str(runtime)])) print('')
the-stack_0_8274
from datetime import datetime from io import BytesIO import os import shutil from behave import * from tsserver import configutils from tsserver.dtutils import datetime_to_str from tsserver.features.testutils import ( open_resource, resource_path, table_to_database ) from tsserver.photos.models import Photo PHOTO_DETAIL_KEYS = {'id', 'filename', 'isPanorama', 'url', 'timestamp'} @given("test photos in upload directory") def step_impl(context): src = os.path.join(resource_path(), 'deathvalley.jpg') uploads = configutils.get_upload_dir() for filename in {'test001.jpg', 'test002.jpg'}: shutil.copyfile(src, os.path.join(uploads, filename)) @given("following photo data") def step_impl(context): table_to_database(context.table, Photo) @then("list of {num:d} object with image details should be sent") @then("list of {num:d} objects with image details should be sent") def step_impl(context, num): assert len(context.rv.json_data) == num assert all(PHOTO_DETAIL_KEYS == set(x) for x in context.rv.json_data) @when("I upload an image to {url}") def step_impl(context, url): data = {'timestamp': datetime_to_str(datetime.now()), 'photo': (open_resource('deathvalley.jpg', mode='rb'), 'TEST_ONLY_deathvalley.jpg')} context.rv = context.request(url, 'POST', data=data) @when("I upload a panorama via PUT to {url}") def step_impl(context, url): data = {'timestamp': datetime_to_str(datetime.now()), 'photo': (open_resource('deathvalley.jpg', mode='rb'), 'TEST_ONLY_deathvalley.jpg')} context.rv = context.request(url, 'PUT', data=data) @then("JSON with image details should be sent") def step_impl(context): assert PHOTO_DETAIL_KEYS == set(context.rv.json_data) # Save the photo filename so it can be later removed context.test_photo_url = context.rv.json_data['filename'] # For "Then the same JSON data should be sent" step context.last_json_data = context.rv.json_data @when('I request file from "{key}" key') def step_impl(context, key): context.rv = context.app.get(context.rv.json_data[key]) @when("I upload a file with '{ext}' extension to {url}") def step_impl(context, ext, url): data = {'timestamp': datetime_to_str(datetime.now()), 'photo': (BytesIO(b'test'), 'example.' + ext)} context.rv = context.request(url, 'POST', data=data)
the-stack_0_8278
import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" import numpy as np import torch import torch.nn as nn import torch.optim as optim import sys import json import gc from tqdm import tqdm from sklearn.cluster import KMeans from encode import lstm_encoder from dataprocess_tacred import data_sampler from model import proto_softmax_layer from dataprocess_tacred import get_data_loader from transformers import BertTokenizer,BertModel from util import set_seed,process_data,getnegfrombatch,select_similar_data_new_tac import faiss def eval_model(config, basemodel, test_set, mem_relations): print("One eval") print("test data num is:\t",len(test_set)) basemodel.eval() test_dataloader = get_data_loader(config, test_set, shuffle=False, batch_size=30) allnum= 0.0 correctnum = 0.0 for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths, typelabels) in enumerate(test_dataloader): logits, rep = basemodel(sentences, lengths) distances = basemodel.get_mem_feature(rep) short_logits = distances #short_logits = logits for index, logit in enumerate(logits): score = short_logits[index] # logits[index] + short_logits[index] + long_logits[index] allnum += 1.0 golden_score = score[labels[index]] max_neg_score = -2147483647.0 for i in neg_labels[index]: # range(num_class): if (i != labels[index]) and (score[i] > max_neg_score): max_neg_score = score[i] if golden_score > max_neg_score: correctnum += 1 acc = correctnum / allnum print(acc) basemodel.train() return acc def get_memory(config, model, proto_set): memset = [] resset = [] rangeset= [0] for i in proto_set: #print(i) memset += i rangeset.append(rangeset[-1] + len(i)) data_loader = get_data_loader(config, memset, False, False) features = [] for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths, typelabels) in enumerate(data_loader): feature = model.get_feature(sentences, lengths) features.append(feature) features = np.concatenate(features) protos = [] #print ("proto_instaces:%d"%len(features)) for i in range(len(proto_set)): protos.append(torch.tensor(features[rangeset[i]:rangeset[i+1],:].mean(0, keepdims = True))) protos = torch.cat(protos, 0) #print(protos.shape) return protos def select_data(mem_set, proto_memory, config, model, divide_train_set, num_sel_data, current_relations, selecttype): ####select data according to selecttype #selecttype is 0: cluster for every rel #selecttype is 1: use ave embedding rela_num = len(current_relations) for i in range(0, rela_num): thisrel = current_relations[i] if thisrel in mem_set.keys(): #print("have set mem before") mem_set[thisrel] = {'0': [], '1': {'h': [], 't': []}} proto_memory[thisrel].pop() else: mem_set[thisrel] = {'0': [], '1': {'h': [], 't': []}} thisdataset = divide_train_set[thisrel] data_loader = get_data_loader(config, thisdataset, False, False) features = [] for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths, typelabels) in enumerate(data_loader): feature = model.get_feature(sentences, lengths) features.append(feature) features = np.concatenate(features) #print(features.shape) num_clusters = min(num_sel_data, len(thisdataset)) if selecttype == 0: kmeans = KMeans(n_clusters=num_clusters, random_state=0) distances = kmeans.fit_transform(features) for i in range(num_clusters): sel_index = np.argmin(distances[:, i]) instance = thisdataset[sel_index] ###change tylelabel instance[11] = 3 ###add to mem data mem_set[thisrel]['0'].append(instance) ####positive sample cluster_center = kmeans.cluster_centers_[i] #print(cluster_center.shape) proto_memory[thisrel].append(instance) elif selecttype == 1: #print("use average embedding") samplenum = features.shape[0] veclength = features.shape[1] sumvec = np.zeros(veclength) for j in range(samplenum): sumvec += features[j] sumvec /= samplenum ###find nearest sample mindist = 100000000 minindex = -100 for j in range(samplenum): dist = np.sqrt(np.sum(np.square(features[j] - sumvec))) if dist < mindist: minindex = j mindist = dist #print(minindex) instance = thisdataset[j] ###change tylelabel instance[11] = 3 mem_set[thisrel]['0'].append(instance) proto_memory[thisrel].append(instance) else: print("error select type") #####to get negative sample mem_set[thisrel]['1'] if rela_num > 1: ####we need to sample negative samples allnegres = {} for i in range(rela_num): thisnegres = {'h':[],'t':[]} currel = current_relations[i] thisrelposnum = len(mem_set[currel]['0']) #assert thisrelposnum == num_sel_data #allnum = list(range(thisrelposnum)) for j in range(thisrelposnum): thisnegres['h'].append(mem_set[currel]['0'][j][3]) thisnegres['t'].append(mem_set[currel]['0'][j][5]) allnegres[currel] = thisnegres ####get neg sample for i in range(rela_num): togetnegindex = (i + 1) % rela_num togetnegrelname = current_relations[togetnegindex] mem_set[current_relations[i]]['1']['h'].extend(allnegres[togetnegrelname]['h']) mem_set[current_relations[i]]['1']['t'].extend(allnegres[togetnegrelname]['t']) return mem_set tempthre = 0.2 factorfor2 = 1.0 factorfor3 = 1.0 factorfor4 = 1.0 factorfor5 = 0.1 def train_model_with_hard_neg(config, model, mem_set, traindata, epochs, current_proto, ifnegtive=0): print(len(traindata)) #print(len(train_set)) mem_data = [] if len(mem_set) != 0: for key in mem_set.keys(): mem_data.extend(mem_set[key]['0']) print(len(mem_data)) train_set = traindata + mem_data #train_set.extend(mem_data) ########??????maybe some question!! 重复添加mem print(len(train_set)) data_loader = get_data_loader(config, train_set, batch_size=config['batch_size_per_step']) model.train() criterion = nn.CrossEntropyLoss() lossfn = nn.MultiMarginLoss(margin=0.2) optimizer = optim.Adam(model.parameters(), config['learning_rate']) for epoch_i in range(epochs): model.set_memorized_prototypes(current_proto) losses1 = [] losses2 = [] losses3 = [] losses4 = [] losses5 = [] lossesfactor1 = 0.0 lossesfactor2 = factorfor2 lossesfactor3 = factorfor3 lossesfactor4 = factorfor4 lossesfactor5 = factorfor5 for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths, typelabels) in enumerate(data_loader): model.zero_grad() #print(len(sentences)) labels = labels.to(config['device']) typelabels = typelabels.to(config['device']) ####0:rel 1:pos(new train data) 2:neg 3:mem numofmem = 0 numofnewtrain = 0 allnum = 0 memindex = [] for index,onetype in enumerate(typelabels): if onetype == 1: numofnewtrain += 1 if onetype == 3: numofmem += 1 memindex.append(index) allnum += 1 #print(numofmem) #print(numofnewtrain) getnegfromnum = 1 allneg = [] alllen = [] if numofmem > 0: ###select neg data for mem for oneindex in memindex: negres,lenres = getnegfrombatch(oneindex,firstent,firstentindex,secondent,secondentindex,sentences,lengths,getnegfromnum,allnum,labels,neg_labels) for aa in negres: allneg.append(torch.tensor(aa)) for aa in lenres: alllen.append(torch.tensor(aa)) sentences.extend(allneg) lengths.extend(alllen) logits, rep = model(sentences, lengths) #print(logits.shape) #print(rep.shape) logits_proto = model.mem_forward(rep) #print(logits_proto.shape) logitspos = logits[0:allnum,] #print(logitspos.shape) logits_proto_pos = logits_proto[0:allnum,] #print(logits_proto_pos.shape) if numofmem > 0: logits_proto_neg = logits_proto[allnum:,] logits = logitspos logits_proto = logits_proto_pos loss1 = criterion(logits, labels) loss2 = criterion(logits_proto, labels) loss4 = lossfn(logits_proto, labels) loss3 = torch.tensor(0.0).to(config['device']) for index, logit in enumerate(logits): score = logits_proto[index] preindex = labels[index] maxscore = score[preindex] size = score.shape[0] secondmax = -100000 for j in range(size): if j != preindex and score[j] > secondmax: secondmax = score[j] if secondmax - maxscore + tempthre > 0.0: loss3 += (secondmax - maxscore + tempthre).to(config['device']) loss3 /= logits.shape[0] start = 0 loss5 = torch.tensor(0.0).to(config['device']) allusenum = 0 for index in memindex: onepos = logits_proto[index] posindex = labels[index] #poslabelscore = torch.exp(onepos[posindex]) poslabelscore = onepos[posindex] negnum = getnegfromnum * 2 negscore = torch.tensor(0.0).to(config['device']) for ii in range(start, start + negnum): oneneg = logits_proto_neg[ii] #negscore += torch.exp(oneneg[posindex]) negscore = oneneg[posindex] if negscore - poslabelscore + 0.01 > 0.0 and negscore < poslabelscore: loss5 += (negscore - poslabelscore + 0.01) allusenum += 1 #loss5 += (-torch.log(poslabelscore/(poslabelscore+negscore))) start += negnum #print(len(memindex)) if len(memindex) == 0: loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4 else: #loss5 /= len(memindex) loss5 = loss5 / allusenum #loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4 ###no loss5 loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4 + loss5 * lossesfactor5 ###with loss5 loss.backward() losses1.append(loss1.item()) losses2.append(loss2.item()) losses3.append(loss3.item()) losses4.append(loss4.item()) losses5.append(loss5.item()) #print("step:\t", step, "\tloss1:\t", loss1.item(), "\tloss2:\t", loss2.item(), "\tloss3:\t", loss3.item(), # "\tloss4:\t", loss4.item(), "\tloss5:\t", loss5.item()) torch.nn.utils.clip_grad_norm_(model.parameters(), config['max_grad_norm']) optimizer.step() return model def train_simple_model(config, model, mem_set, train_set, epochs, current_proto, ifusemem=False): if ifusemem: mem_data = [] if len(mem_set)!=0: for key in mem_set.keys(): mem_data.extend(mem_set[key]['0']) train_set.extend(mem_data) data_loader = get_data_loader(config, train_set, batch_size=config['batch_size_per_step']) model.train() criterion = nn.CrossEntropyLoss() lossfn = nn.MultiMarginLoss(margin=0.2) optimizer = optim.Adam(model.parameters(), config['learning_rate']) for epoch_i in range(epochs): model.set_memorized_prototypes(current_proto) losses1 = [] losses2 = [] losses3 = [] losses4 = [] lossesfactor1 = 0.0 lossesfactor2 = factorfor2 lossesfactor3 = factorfor3 lossesfactor4 = factorfor4 for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths, typelabels) in enumerate(tqdm(data_loader)): model.zero_grad() logits, rep = model(sentences, lengths) logits_proto = model.mem_forward(rep) labels = labels.to(config['device']) loss1 = criterion(logits, labels) loss2 = criterion(logits_proto, labels) loss4 = lossfn(logits_proto, labels) loss3 = torch.tensor(0.0).to(config['device']) ###add triple loss for index, logit in enumerate(logits): score = logits_proto[index] preindex = labels[index] maxscore = score[preindex] size = score.shape[0] secondmax = -100000 for j in range(size): if j != preindex and score[j] > secondmax: secondmax = score[j] if secondmax - maxscore + tempthre > 0.0: loss3 += (secondmax - maxscore + tempthre).to(config['device']) loss3 /= logits.shape[0] loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4 loss.backward() losses1.append(loss1.item()) losses2.append(loss2.item()) losses3.append(loss3.item()) losses4.append(loss4.item()) torch.nn.utils.clip_grad_norm_(model.parameters(), config['max_grad_norm']) optimizer.step() #print (np.array(losses).mean()) return model if __name__ == '__main__': select_thredsold_param = 0.65 select_num = 1 f = open("config/config_tacred.json", "r") config = json.loads(f.read()) f.close() config['device'] = torch.device('cuda' if torch.cuda.is_available() and config['use_gpu'] else 'cpu') config['n_gpu'] = torch.cuda.device_count() config['batch_size_per_step'] = int(config['batch_size'] / config["gradient_accumulation_steps"]) config['neg_sampling'] = False root_path = '.' word2id = json.load(open(os.path.join(root_path, 'glove/word2id.txt'))) word2vec = np.load(os.path.join(root_path, 'glove/word2vec.npy')) tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") donum = 1 distantpath = "data/distantdata/" file1 = distantpath + "distant.json" file2 = distantpath + "exclude_fewrel_distant.json" list_data,entpair2scope = process_data(file1,file2) topk = 16 max_sen_length_for_select = 64 max_sen_lstm_tokenize = 128 select_thredsold = select_thredsold_param print("********* load from ckpt ***********") ckptpath = "simmodelckpt" print(ckptpath) ckpt = torch.load(ckptpath) SimModel = BertModel.from_pretrained('bert-base-uncased',state_dict=ckpt["bert-base"]).to(config["device"]) allunlabledata = np.load("allunlabeldata.npy").astype('float32') d = 768 * 2 index = faiss.IndexFlatIP(d) print(index.is_trained) index.add(allunlabledata) # add vectors to the index print(index.ntotal) for m in range(donum): print(m) config["rel_cluster_label"] = "data/tacred/CFRLdata_10_100_10_10/rel_cluster_label_" + str(m) + ".npy" config['training_file'] = "data/tacred/CFRLdata_10_100_10_10/train_" + str(m) + ".txt" config['valid_file'] = "data/tacred/CFRLdata_10_100_10_10/valid_" + str(m) + ".txt" config['test_file'] = "data/tacred/CFRLdata_10_100_10_10/test_" + str(m) + ".txt" encoderforbase = lstm_encoder(token2id=word2id, word2vec=word2vec, word_size=len(word2vec[0]), max_length=128, pos_size=None, hidden_size=config['hidden_size'], dropout=0, bidirectional=True, num_layers=1, config=config) sampler = data_sampler(config, encoderforbase.tokenizer) modelforbase = proto_softmax_layer(encoderforbase, num_class=len(sampler.id2rel), id2rel=sampler.id2rel, drop=0, config=config) modelforbase = modelforbase.to(config["device"]) word2vec_back = word2vec.copy() sequence_results = [] result_whole_test = [] for i in range(6): num_class = len(sampler.id2rel) print(config['random_seed'] + 10 * i) set_seed(config, config['random_seed'] + 10 * i) sampler.set_seed(config['random_seed'] + 10 * i) mem_set = {} #### mem_set = {rel_id:{'0':[positive samples],'1':[negative samples]}} 换5个head 换5个tail mem_relations = [] ###not include relation of current task past_relations = [] savetest_all_data = None saveseen_relations = [] proto_memory = [] for i in range(len(sampler.id2rel)): proto_memory.append([sampler.id2rel_pattern[i]]) oneseqres = [] ################################## whichdataselecct = 1 ifnorm = True ################################## for steps, (training_data, valid_data, test_data, test_all_data, seen_relations, current_relations) in enumerate(sampler): #print(steps) print("------------------------") print(len(training_data)) #for aa in range(20): # print(training_data[aa]) savetest_all_data = test_all_data saveseen_relations = seen_relations currentnumber = len(current_relations) print(currentnumber) print(current_relations) divide_train_set = {} for relation in current_relations: divide_train_set[relation] = [] ##int for data in training_data: divide_train_set[data[0]].append(data) print(len(divide_train_set)) ####select most similar sentence for new task, not for base task ####step==0是base model if steps == 0: ##train base model print("train base model,not select most similar") else: print("train new model,select most similar") selectdata = select_similar_data_new_tac(training_data, tokenizer, entpair2scope, topk, max_sen_length_for_select,list_data, config, SimModel, select_thredsold,max_sen_lstm_tokenize,encoderforbase.tokenizer,index,ifnorm,select_num) print(len(selectdata)) training_data.extend(selectdata) print(len(training_data)) #''' current_proto = get_memory(config, modelforbase, proto_memory) modelforbase = train_simple_model(config, modelforbase, mem_set, training_data, 1, current_proto, False) select_data(mem_set, proto_memory, config, modelforbase, divide_train_set, config['rel_memory_size'], current_relations, 0) ##config['rel_memory_size'] == 1 for j in range(2): current_proto = get_memory(config, modelforbase, proto_memory) modelforbase = train_model_with_hard_neg(config, modelforbase, mem_set, training_data, 1, current_proto, ifnegtive=0) current_proto = get_memory(config, modelforbase, proto_memory) modelforbase.set_memorized_prototypes(current_proto) mem_relations.extend(current_relations) currentalltest = [] for mm in range(len(test_data)): currentalltest.extend(test_data[mm]) #eval_model(config, modelforbase, test_data[mm], mem_relations) thisstepres = eval_model(config, modelforbase, currentalltest, mem_relations) print("step:\t",steps,"\taccuracy:\t",thisstepres) oneseqres.append(thisstepres) sequence_results.append(np.array(oneseqres)) #def eval_both_model(config, newmodel, basemodel, test_set, mem_relations, baserelation, newrelation, proto_embed): allres = eval_model(config, modelforbase, savetest_all_data, saveseen_relations) result_whole_test.append(allres) print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&") print("after one epoch allres:\t",allres) print(result_whole_test) # initialize the models modelforbase = modelforbase.to('cpu') del modelforbase gc.collect() if config['device'] == 'cuda': torch.cuda.empty_cache() encoderforbase = lstm_encoder(token2id=word2id, word2vec=word2vec_back.copy(), word_size=len(word2vec[0]),max_length=128, pos_size=None, hidden_size=config['hidden_size'], dropout=0, bidirectional=True, num_layers=1, config=config) modelforbase = proto_softmax_layer(encoderforbase, num_class=len(sampler.id2rel), id2rel=sampler.id2rel, drop=0, config=config) modelforbase.to(config["device"]) # output the final avg result print("Final result!") print(result_whole_test) for one in sequence_results: for item in one: sys.stdout.write('%.4f, ' % item) print('') avg_result_all_test = np.average(sequence_results, 0) for one in avg_result_all_test: sys.stdout.write('%.4f, ' % one) print('') print("Finish training............................") #'''
the-stack_0_8280
import asyncio import logging import pathlib import random import tempfile from concurrent.futures.process import ProcessPoolExecutor from typing import IO, List, Tuple, Optional from chia.consensus.block_record import BlockRecord from chia.consensus.constants import ConsensusConstants from chia.full_node.weight_proof import ( _validate_sub_epoch_summaries, vars_to_bytes, validate_sub_epoch_sampling, _validate_sub_epoch_segments, _validate_recent_blocks_and_get_records, chunks, _validate_vdf_batch, ) from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary from chia.types.weight_proof import ( WeightProof, ) from chia.util.ints import uint32 log = logging.getLogger(__name__) def _create_shutdown_file() -> IO: return tempfile.NamedTemporaryFile(prefix="chia_wallet_weight_proof_handler_executor_shutdown_trigger") class WalletWeightProofHandler: LAMBDA_L = 100 C = 0.5 MAX_SAMPLES = 20 def __init__( self, constants: ConsensusConstants, ): self._constants = constants self._num_processes = 4 self._executor_shutdown_tempfile: IO = _create_shutdown_file() self._executor: ProcessPoolExecutor = ProcessPoolExecutor(self._num_processes) self._weight_proof_tasks: List[asyncio.Task] = [] def cancel_weight_proof_tasks(self): for task in self._weight_proof_tasks: if not task.done(): task.cancel() self._weight_proof_tasks = [] self._executor_shutdown_tempfile.close() self._executor.shutdown(wait=True) async def validate_weight_proof( self, weight_proof: WeightProof, skip_segment_validation=False ) -> Tuple[bool, uint32, List[SubEpochSummary], List[BlockRecord]]: task: asyncio.Task = asyncio.create_task( self._validate_weight_proof_inner(weight_proof, skip_segment_validation) ) self._weight_proof_tasks.append(task) valid, fork_point, summaries, block_records = await task self._weight_proof_tasks.remove(task) return valid, fork_point, summaries, block_records async def _validate_weight_proof_inner( self, weight_proof: WeightProof, skip_segment_validation: bool ) -> Tuple[bool, uint32, List[SubEpochSummary], List[BlockRecord]]: assert len(weight_proof.sub_epochs) > 0 if len(weight_proof.sub_epochs) == 0: return False, uint32(0), [], [] peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height log.info(f"validate weight proof peak height {peak_height}") summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self._constants, weight_proof) if summaries is None: log.error("weight proof failed sub epoch data validation") return False, uint32(0), [], [] seed = summaries[-2].get_hash() rng = random.Random(seed) if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof): log.error("failed weight proof sub epoch sample validation") return False, uint32(0), [], [] constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes( self._constants, summaries, weight_proof ) vdf_tasks: List[asyncio.Future] = [] recent_blocks_validation_task: asyncio.Future = asyncio.get_running_loop().run_in_executor( self._executor, _validate_recent_blocks_and_get_records, constants, wp_recent_chain_bytes, summary_bytes, pathlib.Path(self._executor_shutdown_tempfile.name), ) try: if not skip_segment_validation: segments_validated, vdfs_to_validate = _validate_sub_epoch_segments( constants, rng, wp_segment_bytes, summary_bytes ) if not segments_validated: return False, uint32(0), [], [] vdf_chunks = chunks(vdfs_to_validate, self._num_processes) for chunk in vdf_chunks: byte_chunks = [] for vdf_proof, classgroup, vdf_info in chunk: byte_chunks.append((bytes(vdf_proof), bytes(classgroup), bytes(vdf_info))) vdf_task: asyncio.Future = asyncio.get_running_loop().run_in_executor( self._executor, _validate_vdf_batch, constants, byte_chunks, pathlib.Path(self._executor_shutdown_tempfile.name), ) vdf_tasks.append(vdf_task) for vdf_task in vdf_tasks: validated = await vdf_task if not validated: return False, uint32(0), [], [] valid_recent_blocks, records_bytes = await recent_blocks_validation_task finally: recent_blocks_validation_task.cancel() for vdf_task in vdf_tasks: vdf_task.cancel() if not valid_recent_blocks: log.error("failed validating weight proof recent blocks") # Verify the data return False, uint32(0), [], [] records = [BlockRecord.from_bytes(b) for b in records_bytes] # TODO fix find fork point return True, uint32(0), summaries, records def get_fork_point(self, old_wp: Optional[WeightProof], new_wp: WeightProof) -> uint32: """ iterate through sub epoch summaries to find fork point. This method is conservative, it does not return the actual fork point, it can return a height that is before the actual fork point. """ if old_wp is None: return uint32(0) old_ses = set() for ses in old_wp.sub_epochs: old_ses.add(ses.reward_chain_hash) overflow = 0 count = 0 for idx, new_ses in enumerate(new_wp.sub_epochs): if new_ses.reward_chain_hash in old_ses: count += 1 overflow += new_ses.num_blocks_overflow continue else: break # Try to find an exact fork point if new_wp.recent_chain_data[0].height >= old_wp.recent_chain_data[0].height: left_wp = old_wp right_wp = new_wp else: left_wp = new_wp right_wp = old_wp r_index = 0 l_index = 0 while r_index < len(right_wp.recent_chain_data) and l_index < len(left_wp.recent_chain_data): if right_wp.recent_chain_data[r_index].header_hash == left_wp.recent_chain_data[l_index].header_hash: r_index += 1 continue # Keep incrementing left pointer until we find a match l_index += 1 if r_index != 0: # We found a matching block, this is the last matching block return right_wp.recent_chain_data[r_index - 1].height # Just return the matching sub epoch height return uint32((self._constants.SUB_EPOCH_BLOCKS * count) - overflow)
the-stack_0_8282
import urllib.request import unittest import time import dewpoint.aws class TestAWSAuthHandlerV4(unittest.TestCase): def setUp(self): self.auth_handler = dewpoint.aws.AWSAuthHandlerV4( key='AKIDEXAMPLE', secret='wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY', region='us-east-1', service='iam') def test_canonical_request(self): req = urllib.request.Request('https://iam.amazonaws.com/?Action=ListUsers&Version=2010-05-08', headers={ 'Content-type': 'application/x-www-form-urlencoded; charset=utf-8', 'Host': 'iam.amazonaws.com', 'x-amz-date': '20150830T123600Z', }) chash = dewpoint.aws.canonical_hash(req) self.assertEqual(chash, 'f536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59') def test_signing_key(self): scope = '{date}/{region}/{service}/aws4_request'.format( date='20150830', region='us-east-1', service='iam') skey = self.auth_handler.signing_key(scope) self.assertEqual(skey, bytes.fromhex('c4afb1cc5771d871763a393e44b703571b55cc28424d1a5e86da6ed3c154a4b9')) def test_signature(self): req = urllib.request.Request('https://iam.amazonaws.com/?Action=ListUsers&Version=2010-05-08', headers={ 'Content-type': 'application/x-www-form-urlencoded; charset=utf-8', 'Host': 'iam.amazonaws.com', 'x-amz-date': '20150830T123600Z', }) req.timestamp = time.localtime(1440963360.0) self.auth_handler.sign(req) self.assertEqual(req.headers['Authorization'], 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7')
the-stack_0_8283
from flask import current_app, Blueprint, request from assemblyline_ui.api.base import api_login, make_api_response from assemblyline_ui.config import config API_PREFIX = "/api/v4" apiv4 = Blueprint("apiv4", __name__, url_prefix=API_PREFIX) apiv4._doc = "Version 4 Api Documentation" ##################################### # API DOCUMENTATION # noinspection PyProtectedMember,PyBroadException @apiv4.route("/") @api_login(audit=False, required_priv=['R', 'W'], require_type=["user", "signature_importer", "signature_manager", "admin"]) def get_api_documentation(**kwargs): """ Full API doc. Loop through all registered API paths and display their documentation. Returns a list of API definition. Variables: None Arguments: None Data Block: None Result example: [ # LIST of: {'name': "Api Doc", # Name of the api 'path': "/api/path/<variable>/", # API path 'ui_only': false, # Is UI only API 'methods': ["GET", "POST"], # Allowed HTTP methods 'description': "API doc.", # API documentation 'id': "api_doc", # Unique ID for the API 'function': "apiv4.api_doc", # Function called in the code 'protected': False, # Does the API require login? 'require_type': ['user'], # Type of users allowed to use API 'complete' : True}, # Is the API stable? ...] """ user_types = kwargs['user']['type'] api_blueprints = {} api_list = [] for rule in current_app.url_map.iter_rules(): if rule.rule.startswith(request.path): methods = [] for item in rule.methods: if item != "OPTIONS" and item != "HEAD": methods.append(item) func = current_app.view_functions[rule.endpoint] require_type = func.__dict__.get('require_type', ['user']) allow_readonly = func.__dict__.get('allow_readonly', True) if config.ui.read_only and not allow_readonly: continue for u_type in user_types: if u_type in require_type: doc_string = func.__doc__ func_title = " ".join([x.capitalize() for x in rule.endpoint[rule.endpoint.rindex(".") + 1:].split("_")]) blueprint = rule.endpoint[:rule.endpoint.rindex(".")] if blueprint == "apiv4": blueprint = "documentation" if blueprint not in api_blueprints: try: doc = current_app.blueprints[rule.endpoint[:rule.endpoint.rindex(".")]]._doc except Exception: doc = "" api_blueprints[blueprint] = doc try: description = "\n".join([x[4:] for x in doc_string.splitlines()]) except Exception: description = "[INCOMPLETE]\n\nTHIS API HAS NOT BEEN DOCUMENTED YET!" api_id = rule.endpoint.replace("apiv4.", "").replace(".", "_") api_list.append({ "protected": func.__dict__.get('protected', False), "require_type": require_type, "name": func_title, "id": api_id, "function": f"api.v4.{rule.endpoint}", "path": rule.rule, "ui_only": rule.rule.startswith("%sui/" % request.path), "methods": methods, "description": description, "complete": "[INCOMPLETE]" not in description, "required_priv": func.__dict__.get('required_priv', "") }) break return make_api_response({"apis": api_list, "blueprints": api_blueprints})
the-stack_0_8284
mappp = [list(map(int, input().split())) for _ in range(9)] pos = [] for i in range(9): for j in range(9): if mappp[i][j] == 0: pos.append([i, j]) enddd = False def back_dfs(idx): global enddd if enddd: return if idx == len(pos): for i in range(9): for j in range(9): print(mappp[i][j], end=" ") print() enddd = True return else: x = pos[idx][0] y = pos[idx][1] arr = [i for i in range(1, 10)] for a in range(9): if mappp[x][a] in arr: arr.remove(mappp[x][a]) if mappp[a][y] in arr: arr.remove(mappp[a][y]) start_i = (x // 3) * 3 start_j = (y // 3) * 3 for a in range(start_i, start_i + 3): for b in range(start_j, start_j + 3): if mappp[a][b] in arr: arr.remove(mappp[a][b]) for a in arr: mappp[x][y] = a back_dfs(idx + 1) mappp[x][y] = 0 back_dfs(0)
the-stack_0_8285
# -*- coding: utf-8 -*- # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """Kite document requests handlers and senders.""" from collections import defaultdict import logging import hashlib import os import os.path as osp from qtpy.QtCore import QMutexLocker from spyder.plugins.completion.kite.decorators import send_request, handles from spyder.plugins.completion.manager.api import ( LSPRequestTypes, CompletionItemKind) # Kite can return e.g. "int | str", so we make the default hint VALUE. KITE_DOCUMENT_TYPES = defaultdict(lambda: CompletionItemKind.VALUE, { 'function': CompletionItemKind.FUNCTION, 'type': CompletionItemKind.CLASS, 'module': CompletionItemKind.MODULE, 'descriptor': CompletionItemKind.PROPERTY, 'union': CompletionItemKind.VALUE, 'unknown': CompletionItemKind.TEXT, 'keyword': CompletionItemKind.KEYWORD, 'call': CompletionItemKind.FUNCTION, }) KITE_COMPLETION = 'Kite' logger = logging.getLogger(__name__) def convert_text_snippet(snippet_info): text = snippet_info['text'] text_builder = [] prev_pos = 0 next_pos = None num_placeholders = len(snippet_info['placeholders']) total_placeholders = num_placeholders + 1 for i, placeholder in enumerate(snippet_info['placeholders']): placeholder_begin = placeholder['begin'] placeholder_end = placeholder['end'] next_pos = placeholder_begin standard_text = text[prev_pos:next_pos] snippet_text = text[next_pos:placeholder_end] prev_pos = placeholder['end'] text_builder.append(standard_text) placeholder_number = (i + 1) % total_placeholders if snippet_text: snippet = '${%d:%s}' % (placeholder_number, snippet_text) else: snippet = '$%d' % (placeholder_number) text_builder.append(snippet) text_builder.append(text[prev_pos:]) if num_placeholders > 0: text_builder.append('$0') return ''.join(text_builder) class DocumentProvider: @send_request(method=LSPRequestTypes.DOCUMENT_DID_OPEN) def document_did_open(self, params): request = { 'source': 'spyder', 'filename': osp.realpath(params['file']), 'text': params['text'], 'action': 'focus', 'selections': [{ 'start': params['selection_start'], 'end': params['selection_end'], 'encoding': 'utf-16', }], } with QMutexLocker(self.mutex): self.get_status(params['file']) self.opened_files[params['file']] = params['text'] return request @send_request(method=LSPRequestTypes.DOCUMENT_DID_CHANGE) def document_did_change(self, params): request = { 'source': 'spyder', 'filename': osp.realpath(params['file']), 'text': params['text'], 'action': 'edit', 'selections': [{ 'start': params['selection_start'], 'end': params['selection_end'], 'encoding': 'utf-16', }], } with QMutexLocker(self.mutex): self.opened_files[params['file']] = params['text'] return request @send_request(method=LSPRequestTypes.DOCUMENT_CURSOR_EVENT) def document_cursor_event(self, params): request = { 'source': 'spyder', 'filename': osp.realpath(params['file']), 'text': params['text'], 'action': 'edit', 'selections': [{ 'start': params['selection_start'], 'end': params['selection_end'], 'encoding': 'utf-16', }], } return request @send_request(method=LSPRequestTypes.DOCUMENT_COMPLETION) def request_document_completions(self, params): text = self.opened_files[params['file']] request = { 'filename': osp.realpath(params['file']), 'editor': 'spyder', 'no_snippets': not self.enable_code_snippets, 'text': text, 'position': { 'begin': params['selection_start'], 'end': params['selection_end'], }, 'offset_encoding': 'utf-16', } return request @handles(LSPRequestTypes.DOCUMENT_COMPLETION) def convert_completion_request(self, response): # The response schema is tested via mocking in # spyder/plugins/editor/widgets/tests/test_introspection.py logger.debug(response) if response is None: return {'params': []} spyder_completions = [] completions = response['completions'] if completions is not None: for i, completion in enumerate(completions): entry = { 'kind': KITE_DOCUMENT_TYPES.get( completion['hint'], CompletionItemKind.TEXT), 'label': completion['display'], 'textEdit': { 'newText': convert_text_snippet(completion['snippet']), 'range': { 'start': completion['replace']['begin'], 'end': completion['replace']['end'], }, }, 'filterText': '', # Use the returned ordering 'sortText': (i, 0), 'documentation': completion['documentation']['text'], 'provider': KITE_COMPLETION, } spyder_completions.append(entry) if 'children' in completion: for j, child in enumerate(completion['children']): child_entry = { 'kind': KITE_DOCUMENT_TYPES.get( child['hint'], CompletionItemKind.TEXT), 'label': ' '*2 + child['display'], 'textEdit': { 'newText': convert_text_snippet( child['snippet']), 'range': { 'start': child['replace']['begin'], 'end': child['replace']['end'], }, }, 'insertText': convert_text_snippet( child['snippet']), 'filterText': '', # Use the returned ordering 'sortText': (i, j+1), 'documentation': child['documentation']['text'], 'provider': KITE_COMPLETION, } spyder_completions.append(child_entry) return {'params': spyder_completions} @send_request(method=LSPRequestTypes.DOCUMENT_HOVER) def request_hover(self, params): text = self.opened_files.get(params['file'], "") md5 = hashlib.md5(text.encode('utf-8')).hexdigest() path = params['file'] path = path.replace(osp.sep, ':') logger.debug(path) if os.name == 'nt': path = path.replace('::', ':') path = ':windows:' + path request = { 'filename': path, 'hash': md5, 'cursor_runes': params['offset'], 'offset_encoding': 'utf-16', } return None, request @handles(LSPRequestTypes.DOCUMENT_HOVER) def process_hover(self, response): # logger.debug(response) text = None logger.debug(response) if response is not None: report = response['report'] text = report['description_text'] if len(text) == 0: text = None else: text = None return {'params': text} @send_request(method=LSPRequestTypes.DOCUMENT_SIGNATURE) def request_signature(self, request): text = self.opened_files.get(request['file'], "") response = { 'editor': 'spyder', 'filename': request['file'], 'text': text, 'cursor_runes': request['offset'], 'offset_encoding': 'utf-16', } return response @handles(LSPRequestTypes.DOCUMENT_SIGNATURE) def process_signature(self, response): params = None if response is not None: calls = response['calls'] if len(calls) > 0: call = calls[0] callee = call['callee'] documentation = callee['synopsis'] call_label = callee['repr'] signatures = call['signatures'] arg_idx = call['arg_index'] parameters = [] names = [] logger.debug(signatures) if len(signatures) > 0: signature = signatures[0] logger.debug(signature) if signature['args'] is not None: for arg in signature['args']: parameters.append({ 'label': arg['name'], 'documentation': '' }) names.append(arg['name']) func_args = ', '.join(names) call_label = '{0}({1})'.format(call_label, func_args) base_signature = { 'label': call_label, 'documentation': documentation, 'parameters': parameters } # doc_signatures.append(base_signature) params = { 'signatures': base_signature, 'activeSignature': 0, 'activeParameter': arg_idx, 'provider': KITE_COMPLETION } return {'params': params}
the-stack_0_8286
#!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Copyright (c) 2017 The Raven Core developers # Copyright (c) 2018 The Rito Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Testing asset mempool use cases """ from test_framework.test_framework import RitoTestFramework from test_framework.util import * import string class AssetMempoolTest(RitoTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 def activate_assets(self): self.log.info("Generating RITO and activating assets...") n0, n1 = self.nodes[0], self.nodes[1] n0.generate(1) self.sync_all() n0.generate(216) self.sync_all() n1.generate(216) self.sync_all() assert_equal("active", n0.getblockchaininfo()['bip9_softforks']['assets']['status']) def issue_mempool_test(self): self.log.info("Testing issue mempool...") n0, n1 = self.nodes[0], self.nodes[1] disconnect_all_nodes(self.nodes) asset_name = "MEMPOOL" # Issue asset on chain 1 and mine it into the blocks n0.issue(asset_name) n0.generate(15) # Issue asset on chain 2 but keep it in the mempool. No mining txid = n1.issue(asset_name) print(txid) connect_all_nodes_bi(self.nodes) assert_equal(n0.getblockcount(), n1.getblockcount()) assert_equal(n0.getbestblockhash(), n1.getbestblockhash()) def run_test(self): self.activate_assets() self.issue_mempool_test() if __name__ == '__main__': AssetMempoolTest().main()
the-stack_0_8287
""" Copyright 2019 Goldman Sachs. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging from collections import namedtuple from enum import Enum, IntEnum from functools import wraps from typing import Optional, Union, List import pandas as pd from gs_quant.api.gs.data import QueryType def _create_enum(name, members): return Enum(name, {n.upper(): n.lower() for n in members}, module=__name__) def _create_int_enum(name, mappings): return IntEnum(name, {k.upper(): v for k, v in mappings.items()}) Interpolate = _create_enum('Interpolate', ['intersect', 'step', 'nan', 'zero', 'time']) Returns = _create_enum('Returns', ['simple', 'logarithmic']) SeriesType = _create_enum('SeriesType', ['prices', 'returns']) Window = namedtuple('Window', ['w', 'r']) def _check_window(x: pd.Series, window: Window): if len(x) > 0: if window.w <= 0: raise ValueError('Window value must be greater than zero.') if window.r > len(x) or window.r < 0: raise ValueError('Ramp value must be less than the length of the series and greater than zero.') def apply_ramp(x: pd.Series, window: Window) -> pd.Series: _check_window(x, window) return x[window.r:] if window.w <= len(x) else pd.Series([]) def normalize_window(x: pd.Series, window: Union[Window, int, None], default_window: int = None) -> Window: if default_window is None: default_window = x.size if isinstance(window, int): window = Window(w=window, r=window) else: if window is None: window = Window(w=default_window, r=0) else: if window.w and window.r is None: window_size = window.w window = Window(w=window_size, r=window_size) elif window.w is None and window.r >= 0: window = Window(w=default_window, r=window.r) _check_window(x, window) return window def plot_function(fn): # Indicates that fn should be exported to plottool as a pure function. fn.plot_function = True return fn def plot_measure(asset_class: Optional[tuple] = None, asset_type: Optional[tuple] = None, dependencies: Optional[List[QueryType]] = []): # Indicates that fn should be exported to plottool as a member function / pseudo-measure. # Set category to None for no restrictions, else provide a tuple of allowed values. def decorator(fn): assert asset_class is None or isinstance(asset_class, tuple) assert asset_type is None or isinstance(asset_type, tuple) fn.plot_measure = True fn.asset_class = asset_class fn.asset_type = asset_type fn.dependencies = dependencies return fn return decorator def log_return(logger: logging.Logger, message): def outer(fn): @wraps(fn) def inner(*args, **kwargs): response = fn(*args, **kwargs) logger.debug('%s: %s', message, response) return response return inner return outer
the-stack_0_8290
import pytest from lkmltools.google_auth_helper import GoogleAuthHelper import os import json @pytest.fixture(scope="module") def get_raw_json(): raw_json = { "type": "service_account", "project_id": "someproject", "private_key_id": "xxx", "private_key": "-----BEGIN PRIVATE KEY-----\nxxx-----END PRIVATE KEY-----\n", "client_email": "[email protected]", "client_id": "1234567890", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/someuser%40appspot.gserviceaccount.com", } return raw_json @pytest.fixture(scope="module") def get_encoded_json(): # this is the encoded version of the raw_json above, so doesn't contain any proper secrets. # The unit tests below confirm that decoding this byte string below matches the JSON above return b"eyd0eXBlJzogJ3NlcnZpY2VfYWNjb3VudCcsICdwcm9qZWN0X2lkJzogJ3NvbWVwcm9qZWN0JywgJ3ByaXZhdGVfa2V5X2lkJzogJ3h4eCcsICdwcml2YXRlX2tleSc6ICctLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbnh4eC0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS1cbicsICdjbGllbnRfZW1haWwnOiAnc29tZXVzZXJAYXBwc3BvdC5nc2VydmljZWFjY291bnQuY29tJywgJ2NsaWVudF9pZCc6ICcxMjM0NTY3ODkwJywgJ2F1dGhfdXJpJzogJ2h0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoJywgJ3Rva2VuX3VyaSc6ICdodHRwczovL29hdXRoMi5nb29nbGVhcGlzLmNvbS90b2tlbicsICdhdXRoX3Byb3ZpZGVyX3g1MDlfY2VydF91cmwnOiAnaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vb2F1dGgyL3YxL2NlcnRzJywgJ2NsaWVudF94NTA5X2NlcnRfdXJsJzogJ2h0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3JvYm90L3YxL21ldGFkYXRhL3g1MDkvc29tZXVzZXIlNDBhcHBzcG90LmdzZXJ2aWNlYWNjb3VudC5jb20nfQ==" def test_encode_service_account(): helper = GoogleAuthHelper() encoded_json = helper.encode_service_account(get_raw_json()) assert encoded_json == get_encoded_json() def test_decode_service_account(): helper = GoogleAuthHelper() decoded_json = helper.decode_service_account(get_encoded_json()) assert decoded_json == get_raw_json() def test_write_decoded_sa_json_to_file(): helper = GoogleAuthHelper() filename = "tmp_test_decoded.json" if os.path.exists(filename): os.remove(filename) helper.write_decoded_sa_json_to_file(get_encoded_json(), filename=filename) assert os.path.exists(filename) with open(filename, "r") as f: data = json.load(f) assert data == get_raw_json() if os.path.exists(filename): os.remove(filename)
the-stack_0_8293
""" Cisco_IOS_XR_lpts_pa_oper This module contains a collection of YANG definitions for Cisco IOS\-XR lpts\-pa package operational data. This module contains definitions for the following management objects\: lpts\-pa\: lpts pre\-ifib data Copyright (c) 2013\-2018 by Cisco Systems, Inc. All rights reserved. """ from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class LptsPa(Entity): """ lpts pre\-ifib data .. attribute:: entry_xr lpts pa bindings **type**\: :py:class:`EntryXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pa_oper.LptsPa.EntryXr>` .. attribute:: entries lpts pa clients **type**\: :py:class:`Entries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pa_oper.LptsPa.Entries>` """ _prefix = 'lpts-pa-oper' _revision = '2015-11-09' def __init__(self): super(LptsPa, self).__init__() self._top_entity = None self.yang_name = "lpts-pa" self.yang_parent_name = "Cisco-IOS-XR-lpts-pa-oper" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("entry-xr", ("entry_xr", LptsPa.EntryXr)), ("entries", ("entries", LptsPa.Entries))]) self._leafs = OrderedDict() self.entry_xr = LptsPa.EntryXr() self.entry_xr.parent = self self._children_name_map["entry_xr"] = "entry-xr" self.entries = LptsPa.Entries() self.entries.parent = self self._children_name_map["entries"] = "entries" self._segment_path = lambda: "Cisco-IOS-XR-lpts-pa-oper:lpts-pa" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LptsPa, [], name, value) class EntryXr(Entity): """ lpts pa bindings .. attribute:: entry Data for single PA Binding **type**\: list of :py:class:`Entry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pa_oper.LptsPa.EntryXr.Entry>` """ _prefix = 'lpts-pa-oper' _revision = '2015-11-09' def __init__(self): super(LptsPa.EntryXr, self).__init__() self.yang_name = "entry-xr" self.yang_parent_name = "lpts-pa" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("entry", ("entry", LptsPa.EntryXr.Entry))]) self._leafs = OrderedDict() self.entry = YList(self) self._segment_path = lambda: "entry-xr" self._absolute_path = lambda: "Cisco-IOS-XR-lpts-pa-oper:lpts-pa/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LptsPa.EntryXr, [], name, value) class Entry(Entity): """ Data for single PA Binding .. attribute:: entry (key) Single Binding entry **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ .. attribute:: ctime Creation Time **type**\: :py:class:`Ctime <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pa_oper.LptsPa.EntryXr.Entry.Ctime>` .. attribute:: utime Update Time **type**\: :py:class:`Utime <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pa_oper.LptsPa.EntryXr.Entry.Utime>` .. attribute:: location Rack/slot/instance **type**\: int **range:** 0..4294967295 .. attribute:: client_id Client ID **type**\: int **range:** 0..4294967295 .. attribute:: vid VR/VRF ID **type**\: int **range:** 0..4294967295 .. attribute:: cookie Cookie **type**\: int **range:** 0..4294967295 .. attribute:: l3protocol Layer 3 protocol **type**\: int **range:** 0..4294967295 .. attribute:: l4protocol Layer 4 protocol **type**\: int **range:** 0..4294967295 .. attribute:: smask Filter operation **type**\: int **range:** 0..4294967295 .. attribute:: ifs Ifhandle **type**\: int **range:** 0..4294967295 .. attribute:: ptype Packet type **type**\: int **range:** 0..4294967295 .. attribute:: local_ip Local address **type**\: str **pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)? .. attribute:: remote_ip Remote address **type**\: str **pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)? .. attribute:: local_len Local address length **type**\: int **range:** 0..255 .. attribute:: remote_len Remote address length **type**\: int **range:** 0..255 .. attribute:: local_port Local port **type**\: int **range:** 0..65535 .. attribute:: remote_port Remote port **type**\: int **range:** 0..65535 .. attribute:: packet_misc L5 info **type**\: int **range:** 0..4294967295 .. attribute:: scope Scope **type**\: int **range:** 0..4294967295 .. attribute:: client_flags Client flags **type**\: int **range:** 0..4294967295 .. attribute:: min_ttl Minimum TTL **type**\: int **range:** 0..255 .. attribute:: lazy_bindq_delay lazy binding queue delay **type**\: int **range:** 0..4294967295 .. attribute:: ptq_delay pending transactions queue delay **type**\: int **range:** 0..4294967295 """ _prefix = 'lpts-pa-oper' _revision = '2015-11-09' def __init__(self): super(LptsPa.EntryXr.Entry, self).__init__() self.yang_name = "entry" self.yang_parent_name = "entry-xr" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['entry'] self._child_classes = OrderedDict([("ctime", ("ctime", LptsPa.EntryXr.Entry.Ctime)), ("utime", ("utime", LptsPa.EntryXr.Entry.Utime))]) self._leafs = OrderedDict([ ('entry', (YLeaf(YType.str, 'entry'), ['str'])), ('location', (YLeaf(YType.uint32, 'location'), ['int'])), ('client_id', (YLeaf(YType.uint32, 'client-id'), ['int'])), ('vid', (YLeaf(YType.uint32, 'vid'), ['int'])), ('cookie', (YLeaf(YType.uint32, 'cookie'), ['int'])), ('l3protocol', (YLeaf(YType.uint32, 'l3protocol'), ['int'])), ('l4protocol', (YLeaf(YType.uint32, 'l4protocol'), ['int'])), ('smask', (YLeaf(YType.uint32, 'smask'), ['int'])), ('ifs', (YLeaf(YType.uint32, 'ifs'), ['int'])), ('ptype', (YLeaf(YType.uint32, 'ptype'), ['int'])), ('local_ip', (YLeaf(YType.str, 'local-ip'), ['str'])), ('remote_ip', (YLeaf(YType.str, 'remote-ip'), ['str'])), ('local_len', (YLeaf(YType.uint8, 'local-len'), ['int'])), ('remote_len', (YLeaf(YType.uint8, 'remote-len'), ['int'])), ('local_port', (YLeaf(YType.uint16, 'local-port'), ['int'])), ('remote_port', (YLeaf(YType.uint16, 'remote-port'), ['int'])), ('packet_misc', (YLeaf(YType.uint32, 'packet-misc'), ['int'])), ('scope', (YLeaf(YType.uint32, 'scope'), ['int'])), ('client_flags', (YLeaf(YType.uint32, 'client-flags'), ['int'])), ('min_ttl', (YLeaf(YType.uint8, 'min-ttl'), ['int'])), ('lazy_bindq_delay', (YLeaf(YType.uint32, 'lazy-bindq-delay'), ['int'])), ('ptq_delay', (YLeaf(YType.uint32, 'ptq-delay'), ['int'])), ]) self.entry = None self.location = None self.client_id = None self.vid = None self.cookie = None self.l3protocol = None self.l4protocol = None self.smask = None self.ifs = None self.ptype = None self.local_ip = None self.remote_ip = None self.local_len = None self.remote_len = None self.local_port = None self.remote_port = None self.packet_misc = None self.scope = None self.client_flags = None self.min_ttl = None self.lazy_bindq_delay = None self.ptq_delay = None self.ctime = LptsPa.EntryXr.Entry.Ctime() self.ctime.parent = self self._children_name_map["ctime"] = "ctime" self.utime = LptsPa.EntryXr.Entry.Utime() self.utime.parent = self self._children_name_map["utime"] = "utime" self._segment_path = lambda: "entry" + "[entry='" + str(self.entry) + "']" self._absolute_path = lambda: "Cisco-IOS-XR-lpts-pa-oper:lpts-pa/entry-xr/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LptsPa.EntryXr.Entry, ['entry', u'location', u'client_id', u'vid', u'cookie', u'l3protocol', u'l4protocol', u'smask', u'ifs', u'ptype', u'local_ip', u'remote_ip', u'local_len', u'remote_len', u'local_port', u'remote_port', u'packet_misc', u'scope', u'client_flags', u'min_ttl', u'lazy_bindq_delay', u'ptq_delay'], name, value) class Ctime(Entity): """ Creation Time .. attribute:: tv_sec Time Sec **type**\: int **range:** 0..4294967295 .. attribute:: tv_nsec Time Nanosec **type**\: int **range:** 0..4294967295 """ _prefix = 'lpts-pa-oper' _revision = '2015-11-09' def __init__(self): super(LptsPa.EntryXr.Entry.Ctime, self).__init__() self.yang_name = "ctime" self.yang_parent_name = "entry" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('tv_sec', (YLeaf(YType.uint32, 'tv-sec'), ['int'])), ('tv_nsec', (YLeaf(YType.uint32, 'tv-nsec'), ['int'])), ]) self.tv_sec = None self.tv_nsec = None self._segment_path = lambda: "ctime" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LptsPa.EntryXr.Entry.Ctime, [u'tv_sec', u'tv_nsec'], name, value) class Utime(Entity): """ Update Time .. attribute:: tv_sec Time Sec **type**\: int **range:** 0..4294967295 .. attribute:: tv_nsec Time Nanosec **type**\: int **range:** 0..4294967295 """ _prefix = 'lpts-pa-oper' _revision = '2015-11-09' def __init__(self): super(LptsPa.EntryXr.Entry.Utime, self).__init__() self.yang_name = "utime" self.yang_parent_name = "entry" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('tv_sec', (YLeaf(YType.uint32, 'tv-sec'), ['int'])), ('tv_nsec', (YLeaf(YType.uint32, 'tv-nsec'), ['int'])), ]) self.tv_sec = None self.tv_nsec = None self._segment_path = lambda: "utime" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LptsPa.EntryXr.Entry.Utime, [u'tv_sec', u'tv_nsec'], name, value) class Entries(Entity): """ lpts pa clients .. attribute:: entry Data for single PA Client **type**\: list of :py:class:`Entry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pa_oper.LptsPa.Entries.Entry>` """ _prefix = 'lpts-pa-oper' _revision = '2015-11-09' def __init__(self): super(LptsPa.Entries, self).__init__() self.yang_name = "entries" self.yang_parent_name = "lpts-pa" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("entry", ("entry", LptsPa.Entries.Entry))]) self._leafs = OrderedDict() self.entry = YList(self) self._segment_path = lambda: "entries" self._absolute_path = lambda: "Cisco-IOS-XR-lpts-pa-oper:lpts-pa/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LptsPa.Entries, [], name, value) class Entry(Entity): """ Data for single PA Client .. attribute:: entry (key) Single Client entry **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ .. attribute:: flags Client flags **type**\: int **range:** 0..4294967295 .. attribute:: open_flags Open flags **type**\: int **range:** 0..4294967295 .. attribute:: location Rack/slot/instance **type**\: int **range:** 0..4294967295 .. attribute:: client_id Client ID **type**\: int **range:** 0..4294967295 .. attribute:: times Transaction statisitics **type**\: str **pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)? """ _prefix = 'lpts-pa-oper' _revision = '2015-11-09' def __init__(self): super(LptsPa.Entries.Entry, self).__init__() self.yang_name = "entry" self.yang_parent_name = "entries" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['entry'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('entry', (YLeaf(YType.str, 'entry'), ['str'])), ('flags', (YLeaf(YType.uint32, 'flags'), ['int'])), ('open_flags', (YLeaf(YType.uint32, 'open-flags'), ['int'])), ('location', (YLeaf(YType.uint32, 'location'), ['int'])), ('client_id', (YLeaf(YType.uint32, 'client-id'), ['int'])), ('times', (YLeaf(YType.str, 'times'), ['str'])), ]) self.entry = None self.flags = None self.open_flags = None self.location = None self.client_id = None self.times = None self._segment_path = lambda: "entry" + "[entry='" + str(self.entry) + "']" self._absolute_path = lambda: "Cisco-IOS-XR-lpts-pa-oper:lpts-pa/entries/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LptsPa.Entries.Entry, ['entry', u'flags', u'open_flags', u'location', u'client_id', u'times'], name, value) def clone_ptr(self): self._top_entity = LptsPa() return self._top_entity
the-stack_0_8295
# Copyright 2007-2010 by Peter Cock. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. from __future__ import print_function from Bio._py3k import basestring import os import warnings try: from StringIO import StringIO # Python 2 # Can't use cStringIO, quoting the documentation, # "Unlike the StringIO module, this module is not able to accept # Unicode strings that cannot be encoded as plain ASCII strings." # Therefore can't use from Bio._py3k import StringIO except ImportError: from io import StringIO # Python 3 from io import BytesIO from Bio import BiopythonWarning, BiopythonParserWarning from Bio import SeqIO from Bio import AlignIO from Bio.SeqRecord import SeqRecord from Bio.Seq import Seq, UnknownSeq from Bio import Alphabet from Bio.Align import MultipleSeqAlignment # TODO - Convert this to using unittest, and check desired warnings # are issued. Used to do that by capturing warnings to stdout and # verifying via the print-and-compare check. However, there was some # frustrating cross-platform inconsistency I couldn't resolve. protein_alphas = [Alphabet.generic_protein] dna_alphas = [Alphabet.generic_dna] rna_alphas = [Alphabet.generic_rna] nucleotide_alphas = [Alphabet.generic_nucleotide, Alphabet.Gapped(Alphabet.generic_nucleotide)] no_alpha_formats = ["fasta", "clustal", "phylip", "phylip-relaxed", "phylip-sequential", "tab", "ig", "stockholm", "emboss", "fastq", "fastq-solexa", "fastq-illumina", "qual"] possible_unknown_seq_formats = ["qual", "genbank", "gb", "embl", "imgt"] #List of formats including alignment only file formats we can read AND write. #The list is initially hard coded to preserve the original order of the unit #test output, with any new formats added since appended to the end. test_write_read_alignment_formats = ["fasta", "clustal", "phylip", "stockholm", "phylip-relaxed"] for format in sorted(SeqIO._FormatToWriter): if format not in test_write_read_alignment_formats: test_write_read_alignment_formats.append(format) for format in sorted(AlignIO._FormatToWriter): if format not in test_write_read_alignment_formats: test_write_read_alignment_formats.append(format) test_write_read_alignment_formats.remove("gb") # an alias for genbank test_write_read_alignment_formats.remove("fastq-sanger") # an alias for fastq # test_files is a list of tuples containing: # - string: file format # - boolean: alignment (requires all seqs be same length) # - string: relative filename # - integer: number of sequences test_files = [ ("sff", False, 'Roche/E3MFGYR02_random_10_reads.sff', 10), #Following examples are also used in test_Clustalw.py ("clustal", True, 'Clustalw/cw02.aln', 2), ("clustal", True, 'Clustalw/opuntia.aln', 7), ("clustal", True, 'Clustalw/hedgehog.aln', 5), ("clustal", True, 'Clustalw/odd_consensus.aln', 2), #Following nucleic examples are also used in test_SeqIO_FastaIO.py ("fasta", False, 'Fasta/lupine.nu', 1), ("fasta", False, 'Fasta/elderberry.nu', 1), ("fasta", False, 'Fasta/phlox.nu', 1), ("fasta", False, 'Fasta/centaurea.nu', 1), ("fasta", False, 'Fasta/wisteria.nu', 1), ("fasta", False, 'Fasta/sweetpea.nu', 1), ("fasta", False, 'Fasta/lavender.nu', 1), #Following protein examples are also used in test_SeqIO_FastaIO.py ("fasta", False, 'Fasta/aster.pro', 1), ("fasta", False, 'Fasta/loveliesbleeding.pro', 1), ("fasta", False, 'Fasta/rose.pro', 1), ("fasta", False, 'Fasta/rosemary.pro', 1), #Following examples are also used in test_BioSQL_SeqIO.py ("fasta", False, 'Fasta/f001', 1), # Protein ("fasta", False, 'Fasta/f002', 3), # DNA #("fasta", False, 'Fasta/f003', 2), # Protein with comments ("fasta", False, 'Fasta/fa01', 2), # Protein with gaps #Following are also used in test_SeqIO_features.py, see also NC_005816.gb ("fasta", False, 'GenBank/NC_005816.fna', 1), ("fasta", False, 'GenBank/NC_005816.ffn', 10), ("fasta", False, 'GenBank/NC_005816.faa', 10), ("fasta", False, 'GenBank/NC_000932.faa', 85), ("tab", False, 'GenBank/NC_005816.tsv', 10), # FASTA -> Tabbed #Following examples are also used in test_GFF.py ("fasta", False, 'GFF/NC_001802.fna', 1), # upper case ("fasta", False, 'GFF/NC_001802lc.fna', 1), # lower case ("fasta", True, 'GFF/multi.fna', 3), # Trivial nucleotide alignment #Following example is also used in test_registry.py ("fasta", False, 'Registry/seqs.fasta', 2), # contains blank line #Following example is also used in test_Nexus.py ("nexus", True, 'Nexus/test_Nexus_input.nex', 9), #Following examples are also used in test_SwissProt.py ("swiss", False, 'SwissProt/sp001', 1), ("swiss", False, 'SwissProt/sp002', 1), ("swiss", False, 'SwissProt/sp003', 1), ("swiss", False, 'SwissProt/sp004', 1), ("swiss", False, 'SwissProt/sp005', 1), ("swiss", False, 'SwissProt/sp006', 1), ("swiss", False, 'SwissProt/sp007', 1), ("swiss", False, 'SwissProt/sp008', 1), ("swiss", False, 'SwissProt/sp009', 1), ("swiss", False, 'SwissProt/sp010', 1), ("swiss", False, 'SwissProt/sp011', 1), ("swiss", False, 'SwissProt/sp012', 1), ("swiss", False, 'SwissProt/sp013', 1), ("swiss", False, 'SwissProt/sp014', 1), ("swiss", False, 'SwissProt/sp015', 1), ("swiss", False, 'SwissProt/sp016', 1), #Following example is also used in test_registry.py ("swiss", False, 'Registry/EDD_RAT.dat', 1), #Following examples are also used in test_Uniprot.py ("uniprot-xml", False, 'SwissProt/uni001', 1), ("uniprot-xml", False, 'SwissProt/uni002', 3), ("uniprot-xml", False, 'SwissProt/Q13639.xml', 1), ("swiss", False, 'SwissProt/Q13639.txt', 1), #Following examples are also used in test_GenBank.py ("genbank", False, 'GenBank/noref.gb', 1), ("genbank", False, 'GenBank/cor6_6.gb', 6), ("genbank", False, 'GenBank/iro.gb', 1), ("genbank", False, 'GenBank/pri1.gb', 1), ("genbank", False, 'GenBank/arab1.gb', 1), ("genbank", False, 'GenBank/protein_refseq.gb', 1), # Old version ("genbank", False, 'GenBank/protein_refseq2.gb', 1), # Revised version ("genbank", False, 'GenBank/extra_keywords.gb', 1), ("genbank", False, 'GenBank/one_of.gb', 1), ("genbank", False, 'GenBank/NT_019265.gb', 1), # contig, no sequence ("genbank", False, 'GenBank/origin_line.gb', 1), ("genbank", False, 'GenBank/blank_seq.gb', 1), ("genbank", False, 'GenBank/dbsource_wrap.gb', 1), ("genbank", False, 'GenBank/NC_005816.gb', 1), # See also AE017046.embl ("genbank", False, 'GenBank/NC_000932.gb', 1), ("genbank", False, 'GenBank/pBAD30.gb', 1), # Odd LOCUS line from Vector NTI # The next example is a truncated copy of gbvrl1.seq from # ftp://ftp.ncbi.nih.gov/genbank/gbvrl1.seq.gz # This includes an NCBI header, and the first three records: ("genbank", False, 'GenBank/gbvrl1_start.seq', 3), #Following files are also used in test_GFF.py ("genbank", False, 'GFF/NC_001422.gbk', 1), #Generated with Entrez.efetch("protein", id="16130152", rettype="gbwithparts") ("genbank", False, 'GenBank/NP_416719.gbwithparts', 1), #GenPept file with nasty bond locations, ("genbank", False, 'GenBank/1MRR_A.gp', 1), #Following files are currently only used here or in test_SeqIO_index.py: ("embl", False, 'EMBL/epo_prt_selection.embl', 9), # proteins ("embl", False, 'EMBL/patents.embl', 4), # more proteins, but no seq ("embl", False, 'EMBL/TRBG361.embl', 1), ("embl", False, 'EMBL/DD231055_edited.embl', 1), ("embl", False, 'EMBL/DD231055_edited2.embl', 1), #Partial ID line ("embl", False, 'EMBL/SC10H5.embl', 1), # Pre 2006 style ID line ("embl", False, 'EMBL/U87107.embl', 1), # Old ID line with SV line ("embl", False, 'EMBL/AAA03323.embl', 1), # 2008, PA line but no AC ("embl", False, 'EMBL/AE017046.embl', 1), # See also NC_005816.gb ("embl", False, 'EMBL/Human_contigs.embl', 2), # contigs, no sequences ("embl", False, 'EMBL/location_wrap.embl', 1), # wrapped locations and unspecified type ("embl", False, 'EMBL/A04195.imgt', 1), # features over indented for EMBL ("imgt", False, 'EMBL/A04195.imgt', 1), # features over indented for EMBL ("stockholm", True, 'Stockholm/simple.sth', 2), ("stockholm", True, 'Stockholm/funny.sth', 6), #Following PHYLIP files are currently only used here and in test_AlignIO.py, #and are mostly from Joseph Felsenstein's PHYLIP v3.6 documentation: ("phylip", True, 'Phylip/reference_dna.phy', 6), ("phylip", True, 'Phylip/reference_dna2.phy', 6), ("phylip", True, 'Phylip/hennigian.phy', 10), ("phylip", True, 'Phylip/horses.phy', 10), ("phylip", True, 'Phylip/random.phy', 10), ("phylip", True, 'Phylip/interlaced.phy', 3), ("phylip", True, 'Phylip/interlaced2.phy', 4), #Following are EMBOSS simple or pairs format alignments ("emboss", True, 'Emboss/alignret.txt', 4), ("emboss", False, 'Emboss/needle.txt', 10), ("emboss", True, 'Emboss/water.txt', 2), #Following PHD (PHRAP) sequencing files are also used in test_Phd.py ("phd", False, 'Phd/phd1', 3), ("phd", False, 'Phd/phd2', 1), ("phd", False, 'Phd/phd_solexa', 2), ("phd", False, 'Phd/phd_454', 1), #Following ACE assembly files are also used in test_Ace.py ("ace", False, 'Ace/contig1.ace', 2), ("ace", False, 'Ace/consed_sample.ace', 1), ("ace", False, 'Ace/seq.cap.ace', 1), #Following IntelliGenetics / MASE files are also used in test_intelligenetics.py ("ig", False, 'IntelliGenetics/TAT_mase_nuc.txt', 17), ("ig", True, 'IntelliGenetics/VIF_mase-pro.txt', 16), #This next file is a MASE alignment but sequence O_ANT70 is shorter than #the others (so as an alignment will fail). Perhaps MASE doesn't #write trailing gaps? ("ig", False, 'IntelliGenetics/vpu_nucaligned.txt', 9), #Following NBRD-PIR files are used in test_nbrf.py ("pir", False, 'NBRF/B_nuc.pir', 444), ("pir", False, 'NBRF/Cw_prot.pir', 111), ("pir", False, 'NBRF/DMA_nuc.pir', 4), ("pir", False, 'NBRF/DMB_prot.pir', 6), ("pir", True, 'NBRF/clustalw.pir', 2), #Following quality files are also used in the Bio.SeqIO.QualityIO doctests: ("fasta", True, 'Quality/example.fasta', 3), ("qual", False, 'Quality/example.qual', 3), ("fastq", True, 'Quality/example.fastq', 3), #Unix new lines ("fastq", True, 'Quality/example_dos.fastq', 3), #DOS/Windows new lines ("fastq", True, 'Quality/tricky.fastq', 4), ("fastq", False, 'Quality/sanger_faked.fastq', 1), ("fastq", False, 'Quality/sanger_93.fastq', 1), ("fastq-illumina", False, 'Quality/illumina_faked.fastq', 1), ("fastq-solexa", False, 'Quality/solexa_faked.fastq', 1), ("fastq-solexa", True, 'Quality/solexa_example.fastq', 5), #Following examples are also used in test_SeqXML.py ("seqxml", False, 'SeqXML/dna_example.xml', 4), ("seqxml", False, 'SeqXML/rna_example.xml', 5), ("seqxml", False, 'SeqXML/protein_example.xml', 5), #Following examples are also used in test_SeqIO_AbiIO.py ("abi", False, 'Abi/310.ab1', 1), ("abi", False, 'Abi/3100.ab1', 1), ("abi", False, 'Abi/3730.ab1', 1), ] class ForwardOnlyHandle(object): """Mimic a network handle without seek and tell methods etc.""" def __init__(self, handle): self._handle = handle def __iter__(self): return iter(self._handle) def read(self, length=None): if length is None: return self._handle.read() else: return self._handle.read(length) def readline(self): return self._handle.readline() def close(self): return self._handle.close() def compare_record(record_one, record_two): """This is meant to be a strict comparison for exact agreement...""" assert isinstance(record_one, SeqRecord) assert isinstance(record_two, SeqRecord) assert record_one.seq is not None assert record_two.seq is not None if record_one.id != record_two.id: return False if record_one.name != record_two.name: return False if record_one.description != record_two.description: return False if len(record_one) != len(record_two): return False if isinstance(record_one.seq, UnknownSeq) \ and isinstance(record_two.seq, UnknownSeq): #Jython didn't like us comparing the string of very long UnknownSeq #object (out of heap memory error) if record_one.seq._character != record_two.seq._character: return False elif str(record_one.seq) != str(record_two.seq): return False #TODO - check features and annotation (see code for BioSQL tests) for key in set(record_one.letter_annotations).intersection( record_two.letter_annotations): if record_one.letter_annotations[key] != \ record_two.letter_annotations[key]: return False return True def record_summary(record, indent=" "): """Returns a concise summary of a SeqRecord object as a string""" if record.id == record.name: answer = "%sID and Name='%s',\n%sSeq='" % (indent, record.id, indent) else: answer = "%sID = '%s', Name='%s',\n%sSeq='" % (indent, record.id, record.name, indent) if record.seq is None: answer += "None" else: if len(record.seq) > 50: answer += str(record.seq[:40]) + "..." + str(record.seq[-7:]) else: answer += str(record.seq) answer += "', length=%i" % (len(record.seq)) return answer def col_summary(col_text): if len(col_text) < 65: return col_text else: return col_text[:60] + "..." + col_text[-5:] def alignment_summary(alignment, index=" "): """Returns a concise summary of an Alignment object as a string""" answer = [] alignment_len = alignment.get_alignment_length() rec_count = len(alignment) for i in range(min(5, alignment_len)): answer.append(index + col_summary(alignment[:, i]) + " alignment column %i" % i) if alignment_len > 5: i = alignment_len - 1 answer.append(index + col_summary("|" * rec_count) + " ...") answer.append(index + col_summary(alignment[:, i]) + " alignment column %i" % i) return "\n".join(answer) def check_simple_write_read(records, indent=" "): #print(indent+"Checking we can write and then read back these records") for format in test_write_read_alignment_formats: if format not in possible_unknown_seq_formats \ and isinstance(records[0].seq, UnknownSeq) \ and len(records[0].seq) > 100: #Skipping for speed. Some of the unknown sequences are #rather long, and it seems a bit pointless to record them. continue print(indent+"Checking can write/read as '%s' format" % format) #Going to write to a handle... if format in SeqIO._BinaryFormats: handle = BytesIO() else: handle = StringIO() try: with warnings.catch_warnings(): #e.g. data loss warnings.simplefilter("ignore", BiopythonWarning) c = SeqIO.write(sequences=records, handle=handle, format=format) assert c == len(records) except (TypeError, ValueError) as e: #This is often expected to happen, for example when we try and #write sequences of different lengths to an alignment file. if "len()" in str(e): #Python 2.4.3, #>>> len(None) #... #TypeError: len() of unsized object # #Python 2.5.2, #>>> len(None) #... #TypeError: object of type 'NoneType' has no len() print("Failed: Probably len() of None") else: print(indent+"Failed: %s" % str(e)) if records[0].seq.alphabet.letters is not None: assert format != t_format, \ "Should be able to re-write in the original format!" #Carry on to the next format: continue handle.flush() handle.seek(0) #Now ready to read back from the handle... try: records2 = list(SeqIO.parse(handle=handle, format=format)) except ValueError as e: #This is BAD. We can't read our own output. #I want to see the output when called from the test harness, #run_tests.py (which can be funny about new lines on Windows) handle.seek(0) raise ValueError("%s\n\n%s\n\n%s" % (str(e), repr(handle.read()), repr(records))) assert len(records2) == t_count for r1, r2 in zip(records, records2): #Check the bare minimum (ID and sequence) as #many formats can't store more than that. assert len(r1) == len(r2) #Check the sequence if format in ["gb", "genbank", "embl", "imgt"]: #The GenBank/EMBL parsers will convert to upper case. if isinstance(r1.seq, UnknownSeq) \ and isinstance(r2.seq, UnknownSeq): #Jython didn't like us comparing the string of very long #UnknownSeq object (out of heap memory error) assert r1.seq._character.upper() == r2.seq._character else: assert str(r1.seq).upper() == str(r2.seq) elif format == "qual": assert isinstance(r2.seq, UnknownSeq) assert len(r2) == len(r1) else: assert str(r1.seq) == str(r2.seq) #Beware of different quirks and limitations in the #valid character sets and the identifier lengths! if format in ["phylip", "phylip-sequential"]: assert r1.id.replace("[", "").replace("]", "")[:10] == r2.id, \ "'%s' vs '%s'" % (r1.id, r2.id) elif format=="phylip-relaxed": assert r1.id.replace(" ", "").replace(':', '|') == r2.id, \ "'%s' vs '%s'" % (r1.id, r2.id) elif format=="clustal": assert r1.id.replace(" ", "_")[:30] == r2.id, \ "'%s' vs '%s'" % (r1.id, r2.id) elif format=="stockholm": assert r1.id.replace(" ", "_") == r2.id, \ "'%s' vs '%s'" % (r1.id, r2.id) elif format=="fasta": assert r1.id.split()[0] == r2.id else: assert r1.id == r2.id, \ "'%s' vs '%s'" % (r1.id, r2.id) if len(records)>1: #Try writing just one record (passing a SeqRecord, not a list) if format in SeqIO._BinaryFormats: handle = BytesIO() else: handle = StringIO() SeqIO.write(records[0], handle, format) assert handle.getvalue() == records[0].format(format) #Check parsers can cope with an empty file for t_format in SeqIO._FormatToIterator: if t_format in SeqIO._BinaryFormats or \ t_format in ("uniprot-xml", "pdb-seqres", "pdb-atom"): #Not allowed empty SFF files. continue handle = StringIO() records = list(SeqIO.parse(handle, t_format)) assert len(records) == 0 for (t_format, t_alignment, t_filename, t_count) in test_files: if t_format in SeqIO._BinaryFormats: mode = "rb" else: mode = "r" print("Testing reading %s format file %s" % (t_format, t_filename)) assert os.path.isfile(t_filename), t_filename with warnings.catch_warnings(): # e.g. BiopythonParserWarning: Dropping bond qualifier in feature location warnings.simplefilter("ignore", BiopythonParserWarning) #Try as an iterator using handle h = open(t_filename, mode) records = list(SeqIO.parse(handle=h, format=t_format)) h.close() assert len(records) == t_count, \ "Found %i records but expected %i" % (len(records), t_count) #Try using the iterator with a for loop, and a filename not handle records2 = [] for record in SeqIO.parse(t_filename, format=t_format): records2.append(record) assert len(records2) == t_count #Try using the iterator with the next() method records3 = [] h = open(t_filename, mode) seq_iterator = SeqIO.parse(handle=h, format=t_format) while True: try: record = next(seq_iterator) except StopIteration: break assert record is not None, "Should raise StopIteration not return None" records3.append(record) h.close() #Try a mixture of next() and list (a torture test!) h = open(t_filename, mode) seq_iterator = SeqIO.parse(handle=h, format=t_format) try: record = next(seq_iterator) except StopIteration: record = None if record is not None: records4 = [record] records4.extend(list(seq_iterator)) else: records4 = [] assert len(records4) == t_count h.close() #Try a mixture of next() and for loop (a torture test!) #with a forward-only-handle if t_format == "abi": #Temp hack h = open(t_filename, mode) else: h = ForwardOnlyHandle(open(t_filename, mode)) seq_iterator = SeqIO.parse(h, format=t_format) try: record = next(seq_iterator) except StopIteration: record = None if record is not None: records5 = [record] for record in seq_iterator: records5.append(record) else: records5 = [] assert len(records5) == t_count h.close() for i in range(t_count): record = records[i] #Check returned expected object type assert isinstance(record, SeqRecord) if t_format in possible_unknown_seq_formats: assert isinstance(record.seq, Seq) or \ isinstance(record.seq, UnknownSeq) else: assert isinstance(record.seq, Seq) assert isinstance(record.id, basestring) assert isinstance(record.name, basestring) assert isinstance(record.description, basestring) assert record.id != "" if "accessions" in record.annotations: accs = record.annotations["accessions"] #Check for blanks, or entries with leading/trailing spaces for acc in accs: assert acc and acc == acc.strip(), \ "Bad accession in annotations: %s" % repr(acc) assert len(set(accs)) == len(accs), \ "Repeated accession in annotations: %s" % repr(accs) for ref in record.dbxrefs: assert ref and ref == ref.strip(), \ "Bad cross reference in dbxrefs: %s" % repr(ref) assert len(record.dbxrefs) == len(record.dbxrefs), \ "Repeated cross reference in dbxrefs: %s" % repr(record.dbxrefs) #Check the lists obtained by the different methods agree assert compare_record(record, records2[i]) assert compare_record(record, records3[i]) assert compare_record(record, records4[i]) assert compare_record(record, records5[i]) if i < 3: print(record_summary(record)) # Only printed the only first three records: 0,1,2 if t_count > 4: print(" ...") if t_count > 3: print(record_summary(records[-1])) # Check Bio.SeqIO.read(...) if t_count == 1: record = SeqIO.read(t_filename, format=t_format) assert isinstance(record, SeqRecord) else: try: record = SeqIO.read(t_filename, t_format) assert False, "Bio.SeqIO.read(...) should have failed" except ValueError: #Expected to fail pass # Check alphabets for record in records: base_alpha = Alphabet._get_base_alphabet(record.seq.alphabet) if isinstance(base_alpha, Alphabet.SingleLetterAlphabet): if t_format in no_alpha_formats: assert base_alpha == Alphabet.single_letter_alphabet # Too harsh? else: base_alpha = None if base_alpha is None: good = [] bad =[] given_alpha=None elif isinstance(base_alpha, Alphabet.ProteinAlphabet): good = protein_alphas bad = dna_alphas + rna_alphas + nucleotide_alphas elif isinstance(base_alpha, Alphabet.RNAAlphabet): good = nucleotide_alphas + rna_alphas bad = protein_alphas + dna_alphas elif isinstance(base_alpha, Alphabet.DNAAlphabet): good = nucleotide_alphas + dna_alphas bad = protein_alphas + rna_alphas elif isinstance(base_alpha, Alphabet.NucleotideAlphabet): good = nucleotide_alphas bad = protein_alphas else: assert t_format in no_alpha_formats, "Got %s from %s file" \ % (repr(base_alpha), t_format) good = protein_alphas + dna_alphas + rna_alphas + nucleotide_alphas bad = [] for given_alpha in good: #These should all work... given_base = Alphabet._get_base_alphabet(given_alpha) for record in SeqIO.parse(t_filename, t_format, given_alpha): base_alpha = Alphabet._get_base_alphabet(record.seq.alphabet) assert isinstance(base_alpha, given_base.__class__) assert base_alpha == given_base if t_count == 1: h = open(t_filename, mode) record = SeqIO.read(h, t_format, given_alpha) h.close() assert isinstance(base_alpha, given_base.__class__) assert base_alpha == given_base for given_alpha in bad: #These should all fail... h = open(t_filename, mode) try: print(next(SeqIO.parse(h, t_format, given_alpha))) h.close() assert False, "Forcing wrong alphabet, %s, should fail (%s)" \ % (repr(given_alpha), t_filename) except ValueError: #Good - should fail pass h.close() del good, bad, given_alpha, base_alpha if t_alignment: print("Testing reading %s format file %s as an alignment" \ % (t_format, t_filename)) alignment = MultipleSeqAlignment(SeqIO.parse( handle=t_filename, format=t_format)) assert len(alignment) == t_count alignment_len = alignment.get_alignment_length() #Check the record order agrees, and double check the #sequence lengths all agree too. for i in range(t_count): assert compare_record(records[i], alignment[i]) assert len(records[i].seq) == alignment_len print(alignment_summary(alignment)) #Some alignment file formats have magic characters which mean #use the letter in this position in the first sequence. #They should all have been converted by the parser, but if #not reversing the record order might expose an error. Maybe. records.reverse() check_simple_write_read(records) print("Finished tested reading files")
the-stack_0_8297
class Solution: def compareVersion(self, version1: str, version2: str) -> int: l1 = [int(s) for s in version1.split(".")] l2 = [int(s) for s in version2.split(".")] len1, len2 = len(l1), len(l2) if len1 > len2: l2 += [0] * (len1 - len2) elif len1 < len2: l1 += [0] * (len2 - len1) return (l1 > l2) - (l1 < l2)
the-stack_0_8298
import unittest from unittest import mock from datetime import datetime,timedelta from shutil import rmtree import os import json import dotenv # project modules from logs.config.logging import logs_config from locations import paths, dirs, root_dir, test_dir from modules.email import email_notification, login_to_gmail_and_send # LOGGING logs_config(paths["logs_config_test"]) # ENV VARS dotenv.load_dotenv(root_dir / ".dev.env") # MOCK VARS mock_dirs = { "payload_email": test_dir / "fixtures/payload_email/", "email_template": dirs["email_template"], "email_final": test_dir / "output/email_final/", "payload_csv": test_dir / "fixtures/payload_csv/", } mock_paths = { "payload_email": mock_dirs["payload_email"] / "email-homicide1.html", "email_final": mock_dirs["email_final"] / "email.html", "payload_csv": mock_dirs["payload_csv"] / "dockets_murder_and_hom.csv", } class TestEmailHomicideAndMurder(unittest.TestCase): def setUp(self) -> None: # clean up if mock_dirs["email_final"].is_dir(): rmtree(mock_dirs["email_final"]) mock_dirs["email_final"].mkdir(parents=True, exist_ok=True) # vars self.scrape_start_datetime = datetime.now() - timedelta(hours=1) self.scrape_end_datetime = datetime.now() self.target_scrape_day = "yesterday" self.county_list = ["Cumberland", "Perry", "York", "Lancaster"] def tearDown(self) -> None: pass @mock.patch.dict(paths, mock_paths, clear=True) @mock.patch.dict(dirs, mock_dirs, clear=True) def test_email_with_homicide_and_murder_sends(self): """ Test that email notification successfully detects that a homicide and murder is included in CSV payload and responds accordingly. """ email_notification( self.scrape_start_datetime, self.scrape_end_datetime, self.target_scrape_day, self.county_list ) if __name__ == "__main__": unittest.main()
the-stack_0_8299
import os import json import logging def load_mock_data(filename): base_dir = os.path.dirname(os.path.abspath(__file__)) resource_file = os.path.join(base_dir, 'test_data/%s' % filename) json_text = '[]' try: with open(resource_file, 'r') as f: json_text = f.read() except IOError: logging.exception('could not load file %s' % filename) return json.loads(json_text)
the-stack_0_8300
import onnx from onnx import helper from onnx import TensorProto from onnx import OperatorSetIdProto import itertools onnxdomain = OperatorSetIdProto() onnxdomain.version = 12 # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification. onnxdomain.domain = "" msdomain = OperatorSetIdProto() msdomain.version = 1 msdomain.domain = "com.microsoft" opsets = [onnxdomain, msdomain] # expect type to be either TensorProto.FLOAT or TensorProto.FLOAT16 def type_to_string(type): return "float" if type ==TensorProto.FLOAT else "float16" def save(model_path, nodes, inputs, outputs, initializers): graph = helper.make_graph( nodes, "CastPropagateTest", inputs, outputs, initializers) model = helper.make_model( graph, opset_imports=opsets, producer_name="onnxruntime-test") onnx.save(model, model_path + ".onnx") def gen_fuse_back2back_casts(model_path): for (type1, type2) in list(itertools.product([TensorProto.FLOAT, TensorProto.FLOAT16], repeat=2)): nodes = [ helper.make_node( "MatMul", ["input_0", "input_1"], ["product"], "MatMul_0"), helper.make_node( "Cast", ["product"], ["product_cast"], "Cast_0", to = type1), helper.make_node( "Cast", ["product_cast"], ["output"], "Cast_1", to = type2) ] input_type = type2 if type1 != type2 else (TensorProto.FLOAT16 if type1 == TensorProto.FLOAT else TensorProto.FLOAT) output_type = input_type if type1 != type2 else (TensorProto.FLOAT16 if input_type == TensorProto.FLOAT else TensorProto.FLOAT) inputs = [ helper.make_tensor_value_info( "input_0", input_type, ['M', 'K']), helper.make_tensor_value_info( "input_1", input_type, ['K', 'N']) ] outputs = [ helper.make_tensor_value_info( "output", output_type, ['M', 'N']), ] save(model_path + "_" + type_to_string(type1) + "_" + type_to_string(type2), nodes, inputs, outputs, []) def gen_fuse_sibling_casts(model_path): for (type1, type2) in list(itertools.product([TensorProto.FLOAT, TensorProto.FLOAT16], repeat=2)): input_type = type2 if type1 != type2 else (TensorProto.FLOAT16 if type1 == TensorProto.FLOAT else TensorProto.FLOAT) nodes = [ helper.make_node( "MatMul", ["input_0", "input_1"], ["product"], "MatMul_0"), helper.make_node( "Cast", ["product"], ["cast_0_output"], "Cast_0", to = type1), helper.make_node( "Identity", ["cast_0_output"], ["output_0"], "Identity_0"), helper.make_node( "Cast", ["product"], ["cast_1_output"], "Cast_1", to = type2), helper.make_node( "Identity", ["cast_1_output"], ["output_1"], "Identity_1") ] inputs = [ helper.make_tensor_value_info( "input_0", input_type, ['M', 'K']), helper.make_tensor_value_info( "input_1", input_type, ['K', 'N']) ] outputs = [ helper.make_tensor_value_info( "output_0", type1, ['M', 'N']), helper.make_tensor_value_info( "output_1", type2, ['M', 'N']) ] save(model_path + "_" + type_to_string(type1) + "_" + type_to_string(type2), nodes, inputs, outputs, []) def flip_type(flip, type): return (TensorProto.FLOAT16 if type == TensorProto.FLOAT else TensorProto.FLOAT) if flip else type def do_cast_inputs(input_0, input_1, nodes): input_cast_type = TensorProto.FLOAT nodes.extend([helper.make_node( "Cast", [input_0], ["cast_"+input_0], "Cast_0", to = input_cast_type), helper.make_node( "Cast", [input_1], ["cast_"+input_1], "Cast_1", to = input_cast_type)]) return "cast_"+input_0, "cast_"+input_1 def do_transpose_inputs(input_0, input_1, nodes): nodes.extend([helper.make_node("Transpose", [input_0], ["input_transpose_0"], "Transpose_0"), helper.make_node("Transpose", [input_1], ["input_transpose_1"], "Transpose_1")]) return "input_transpose_0", "input_transpose_1" def do_cast_product(product, nodes): nodes.insert(1,helper.make_node( "Cast", [product], ["product_cast"], "Cast_2", to = TensorProto.FLOAT16)) return "product_cast" def do_transpose_product(product, nodes): if transpose_product: nodes.append(helper.make_node("Transpose", [product], ["product_transpose"], "Transpose_2")) return "product_transpose" def do_cast_sum(sum, nodes, type): nodes.append(helper.make_node( "Cast", [sum], ["cast_sum"], "Cast_3", to = type)) return "cast_sum" def do_cast_input2(input_2, nodes, type): nodes.append(helper.make_node( "Cast", [input_2], ["cast_"+input_2], "Cast_4", to = type)) return "cast_"+input_2 def gen_propagate_cast_test_model(model_path, transpose_inputs, transpose_product, cast_inputs, cast_product, insert_add, cast_sum, cast_input2, transpose_inputs_before_cast=False): input_0 = "input_0" input_1 = "input_1" product = "product" nodes = [] if transpose_inputs_before_cast: if transpose_inputs: input_0, input_1 = do_transpose_inputs(input_0, input_1, nodes) if cast_inputs: input_0, input_1 = do_cast_inputs(input_0, input_1, nodes) else: if cast_inputs: input_0, input_1 = do_cast_inputs(input_0, input_1, nodes) if transpose_inputs: input_0, input_1 = do_transpose_inputs(input_0, input_1, nodes) nodes.append(helper.make_node( "MatMul", [input_0, input_1], [product], "MatMul_0") ) if transpose_product: product = do_transpose_product(product, nodes) if cast_product: product = do_cast_product(product, nodes) output = product input_type = TensorProto.FLOAT16 if cast_inputs else TensorProto.FLOAT output_type = flip_type(cast_sum, flip_type(cast_product, flip_type(cast_inputs, input_type))) inputs = [ helper.make_tensor_value_info( "input_0", input_type, ['N', 'N']), helper.make_tensor_value_info( "input_1", input_type, ['N', 'N']) ] if insert_add: input_2 = "input_2" add_input_type = flip_type(True, input_type) if cast_inputs != cast_product else input_type add_input_type = flip_type(cast_input2, add_input_type) inputs.append(helper.make_tensor_value_info(input_2, add_input_type, ['N', 'N'])) add_output = "sum" if cast_input2: input_2 = do_cast_input2(input_2, nodes, flip_type(True, add_input_type)) nodes.append(helper.make_node("Add", [product, input_2], [add_output], "Add_0")) if cast_sum: add_output = do_cast_sum(add_output, nodes, flip_type(not cast_input2, add_input_type)) output = add_output outputs = [ helper.make_tensor_value_info( output, output_type, ['N', 'N']) ] save(model_path + ("_transpose_inputs" if transpose_inputs else "") + ("_transpose_product" if transpose_product else "") + ("_cast_inputs" if cast_inputs else "") + ("_cast_product" if cast_product else "") + ("_cast_input2" if cast_input2 else "") + ("_cast_sum" if cast_sum else ""), nodes, inputs, outputs, []) def gen_matmul_two_products(model_path, transpose, transpose_before_cast, second_matmul): def do_transpose(output_0, output_1, transpose, nodes): nodes.append(helper.make_node("Transpose", [output_0], ["transpose_0_"+output_0], "Transpose_0")) output_0 = "transpose_0_"+output_0 if transpose > 1: nodes.append(helper.make_node("Transpose", [output_1], ["transpose_1_"+output_1], "Transpose_1")) output_1 ="transpose_1_"+output_1 return output_0, output_1 input_type = TensorProto.FLOAT input_0 = "input_0" input_1 = "input_1" output = "product" output_0 = "product" output_1 = "product" inputs = [ helper.make_tensor_value_info( input_0, input_type, ['M', 'K']), helper.make_tensor_value_info( input_1, input_type, ['K', 'N']) ] outputs = [] nodes = [ helper.make_node( "MatMul", [input_0, input_1], [output], "MatMul_0")] if second_matmul: nodes.append(helper.make_node( "MatMul", [input_0, input_1], ["second_"+output], "MatMul_1")) outputs.append(helper.make_tensor_value_info( "second_"+output, input_type, ['M', 'N'])) if add_products: nodes.append(helper.make_node( "Add", [output, "second_"+output], ["sum"], "Add_0")) outputs.append(helper.make_tensor_value_info( "sum", input_type, ['M', 'N'])) if transpose > 0 and transpose_before_cast: output_0, output_1 = do_transpose(output_0, output_1, transpose, nodes) nodes.append(helper.make_node( "Cast", [output_0], ["cast_0_"+output_0], "Cast_0", to = TensorProto.FLOAT16)) output_0 = "cast_0_"+output_0 if second_matmul: nodes.append(helper.make_node( "Cast", [output_1], ["cast_1_"+output_1], "Cast_1", to = TensorProto.FLOAT16)) output_1 = "cast_1_"+output_1 if transpose > 0 and not transpose_before_cast: output_0, output_1 = do_transpose(output_0, output_1, transpose, nodes) outputs.extend([ helper.make_tensor_value_info( output_0, flip_type(True, input_type), ['M', 'N']), helper.make_tensor_value_info( output_1, flip_type(second_matmul, input_type), ['M', 'N']) ]) model_path += ("_transpose_before_cast" if transpose_before_cast else "_transpose_after_cast") if transpose > 0 else "" model_path += "_transpose" if transpose > 1 else "" model_path += "_second_matmul" if second_matmul else "" model_path += "_add_products" if add_products else "" save(model_path, nodes, inputs, outputs, []) for (transpose_inputs, transpose_product, cast_inputs, cast_product, insert_add, cast_sum, cast_input2) in list(itertools.product([False, True], repeat=7)): if not insert_add and (cast_sum or cast_input2): continue if cast_inputs or cast_product or cast_sum: gen_propagate_cast_test_model("matmul_add" if insert_add else "matmul", transpose_inputs, transpose_product, cast_inputs, cast_product, insert_add, cast_sum, cast_input2) gen_fuse_sibling_casts("fuse_sibling_casts") gen_fuse_back2back_casts("fuse_back2back_casts") for (transpose, transpose_before_cast, second_matmul, add_products) in list(itertools.product([0,1,2], [False, True], [False, True], [False, True])): if not transpose and transpose_before_cast: continue if not second_matmul and add_products: continue gen_matmul_two_products("matmul_two_outputs", transpose, transpose_before_cast, second_matmul)
the-stack_0_8301
import os import re from . import utils SASS_IMPORT_RE = re.compile(r"""@import\s+['"](.+?(?:\.s[ca]ss)?)['"]\s*;""") def _read_sass_imports(file): deps = [] with open(file) as f: sassfile = f.read() imports = SASS_IMPORT_RE.findall(sassfile) sass_dir = os.path.dirname(file) for imp in imports: dep = utils.resolve_possible_paths(imp, sass_dir, ['.scss', '.sass', '.css'], leading_underscore=True) if dep: deps.append(dep) else: raise ValueError('Invalid SASS import in {}: {}'.format(file, imp)) return deps def sass_dependencies(input_file): return utils.breadth_first_search(_read_sass_imports, input_file) def sass_compile(input_file, output_file, release=False): map_file = output_file + '.map' output_style = 'compressed' if release else 'expanded' source_map = '--source-comments --source-map-embed --source-map-contents --source-map {}'.format(map_file) if not release else '' cmdline = [ utils.get_node_bin_path('node-sass', 'bin', 'node-sass'), '--output-style', output_style, source_map, input_file, output_file, ] try: utils.ensure_deleted(map_file) utils.run_command(cmdline, 'Failed to compile SASS to "{}"'.format(output_file)) except: utils.ensure_deleted(output_file) raise finally: utils.ensure_deleted(map_file)