ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfb4600aaabd0a7ec113ba9076b5612fb5f5d4e | # SPDX-FileCopyrightText: 2017 Carter Nelson for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_onewire.device`
====================================================
Provides access to a single device on the 1-Wire bus.
* Author(s): Carter Nelson
"""
__version__ = "1.2.5"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_OneWire.git"
_MATCH_ROM = b"\x55"
class OneWireDevice:
"""A class to represent a single device on the 1-Wire bus."""
def __init__(self, bus, address):
self._bus = bus
self._address = address
def __enter__(self):
self._select_rom()
return self
def __exit__(self, *exc):
return False
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self._bus.readinto(buf, start=start, end=end)
if start == 0 and end is None and len(buf) >= 8:
if self._bus.crc8(buf):
raise RuntimeError("CRC error.")
def write(self, buf, *, start=0, end=None):
"""
Write the bytes from ``buf`` to the device.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buf: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
"""
return self._bus.write(buf, start=start, end=end)
def _select_rom(self):
self._bus.reset()
self.write(_MATCH_ROM)
self.write(self._address.rom)
|
py | 7dfb4760f21eb6d0ec3db9306d3b4150b91e1be1 | from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.contrib import admin
def autoload(submodules):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
for submodule in submodules:
try:
import_module("{}.{}".format(app, submodule))
except:
if module_has_submodule(mod, submodule):
raise
def run():
autoload(["receivers"])
admin.autodiscover()
|
py | 7dfb47ee819e035b2e2b0e8b8122dcd065635766 | from __future__ import print_function
import io
import os
import platform
import socket
import sys
from eel import chrome
from PyInstaller import __version__ as pyinstaller_version
class ForwardToFunctionStream(io.TextIOBase):
def __init__(self, output_function=print):
self.output_function = output_function
def write(self, string):
self.output_function(string)
return len(string)
def can_use_chrome():
""" Identify if Chrome is available for Eel to use """
chrome_instance_path = chrome.find_path()
return chrome_instance_path is not None and os.path.exists(chrome_instance_path)
def open_output_folder(folder):
""" Open a folder in the local file explorer """
folder_directory = os.path.abspath(folder)
if platform.system() == 'Windows':
os.startfile(folder_directory, 'explore')
elif platform.system() == 'Linux':
os.system('xdg-open "' + folder_directory + '"')
elif platform.system() == 'Darwin':
os.system('open "' + folder_directory + '"')
else:
return False
return True
def get_warnings():
warnings = []
# Make sure PyInstaller 3.4 or above is being used with Python 3.7
try:
if sys.version_info >= (3, 7) and float(pyinstaller_version) < 3.4:
message = 'You will need PyInstaller 3.4 or above to use this tool with Python 3.7.'
message += '\nYou are currently using PyInstaller {pyinstaller_version}.'.format(pyinstaller_version=pyinstaller_version)
message += '\nPlease upgrade PyInstaller: python -m pip install pyinstaller --upgrade'
warnings.append({
'message': message,
'link': None
})
except ValueError:
pass # Dev branches will have pyinstaller_version as a string in the form X.Y.devZ+HASH. Ignore it if this is the case.
# Make sure PyInstaller 4.0 or above is being used with Python 3.8 and 3.9
try:
if sys.version_info.major == 3 and (sys.version_info.minor == 8 or sys.version_info.minor == 9) and float(pyinstaller_version) < 4.1:
message = 'PyInstaller 4.0 and below does not officially support Python 3.8 and 3.9.'
message += '\nYou are currently using PyInstaller {pyinstaller_version}.'.format(pyinstaller_version=pyinstaller_version)
message += '\nIt is highly recommended to update your version of PyInstaller using: python -m pip install pyinstaller --upgrade'
warnings.append({
'message': message,
'link': None
})
except ValueError:
pass # Dev branches will have pyinstaller_version as a string in the form X.Y.devZ+HASH. Ignore it if this is the case.
# Make sure PyInstaller 4.6 or above is being used with Python 3.10
try:
if sys.version_info.major == 3 and sys.version_info.minor == 10 and float(pyinstaller_version) < 4.6:
message = 'You will need PyInstaller 4.6 or above to use this tool with Python 3.10.'
message += '\nYou are currently using PyInstaller {pyinstaller_version}.'.format(pyinstaller_version=pyinstaller_version)
message += '\nPlease upgrade PyInstaller: python -m pip install pyinstaller --upgrade'
warnings.append({
'message': message,
'link': None
})
except ValueError:
pass # Dev branches will have pyinstaller_version as a string in the form X.Y.devZ+HASH. Ignore it if this is the case.
# Make sure we are not using Python from the Windows Store
if "Packages\PythonSoftwareFoundation.Python." in sys.executable:
message = 'It looks like you may be using Python from the Windows Store, the Python binary you are currently using is at:'
message += '"' + sys.executable + '"'
message += '\n\nPython from the Windows Store is not supported by PyInstaller so you may get errors referencing "win32ctypes.pywin32.pywintypes.error: (1920, \'LoadLibraryEx\', \'The file cannot be accessed by the system\'".'
message += '\nTo fix this, use a distribution of Python from python.org.'
warnings.append({
'message': message,
'link': "https://github.com/brentvollebregt/auto-py-to-exe/issues/166"
})
return warnings
def get_port():
""" Get an available port by starting a new server, stopping and and returning the port """
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
return port
|
py | 7dfb47f8246a9e876c9d34997e31f7ea9b57776b | from LightPipes import *
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import patches
wavelength = 1500*nm
size = 25*mm
N = 500
F=Begin(size,wavelength,N)
F=CircAperture(F, 2*mm, x_shift=-6*mm, y_shift=-2*mm)
F=Fresnel(F, 0.4*m)
Xc,Yc, NXc, NYc =Centroid(F)
sx, sy = D4sigma(F)
I0=Intensity(F)
# Axes ...
fig, main_ax = plt.subplots(figsize=(5, 5))
divider = make_axes_locatable(main_ax)
top_ax = divider.append_axes("top", 1.05, pad=0.1, sharex=main_ax)
right_ax = divider.append_axes("right", 1.05, pad=0.1, sharey=main_ax)
# Make some labels invisible
top_ax.xaxis.set_tick_params(labelbottom=False)
right_ax.yaxis.set_tick_params(labelleft=False)
# Labels ...
main_ax.set_xlabel('X [mm]')
main_ax.set_ylabel('Y [mm]')
top_ax.set_ylabel('Intensity [a.u.]')
right_ax.set_xlabel('Intensity [a.u.]')
#plot ...
main_ax.pcolormesh(F.xvalues/mm, F.yvalues/mm, I0)
main_ax.axvline(Xc/mm, color='r')
main_ax.axhline(Yc/mm, color='g')
main_ax.add_patch(patches.Ellipse((Xc/mm, Yc/mm), sx/mm, sy/mm, fill=False, lw=1,color='w', ls='--'))
right_ax.plot(I0[:,NXc],F.yvalues/mm, 'r-', lw=1)
top_ax.plot(F.xvalues/mm,I0[NYc,:], 'g-', lw=1)
plt.show()
|
py | 7dfb482adc314196b795d00b14bef2d5690dda4e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorlayerx as tlx
import tensorlayerx
from tests.utils import CustomTestCase
class Layer_RNN_Test(CustomTestCase):
@classmethod
def setUpClass(self):
self.rnncell_input = tlx.nn.Input([4, 16], name='input')
self.rnncell_prev_h = tlx.nn.Input([4,32])
self.rnncell = tlx.nn.RNNCell(input_size=16, hidden_size=32, bias=True, act='tanh', name='rnncell_1')
self.rnncell_out, _ = self.rnncell(self.rnncell_input, self.rnncell_prev_h)
self.rnn_input = tlx.nn.Input([23, 32, 16], name='input1')
self.rnn_prev_h = tlx.nn.Input([4, 32, 32])
self.rnn = tlx.nn.RNN(
input_size=16, hidden_size=32, bias=True, num_layers=2, bidirectional = True, act='tanh',
batch_first=False, dropout=0, name='rnn_1')
self.rnn_out, _ = self.rnn(self.rnn_input, self.rnn_prev_h)
self.lstmcell_input = tlx.nn.Input([4, 16], name='input')
self.lstmcell_prev_h = tlx.nn.Input([4, 32])
self.lstmcell_prev_c = tlx.nn.Input([4, 32])
self.lstmcell = tlx.nn.LSTMCell(input_size=16, hidden_size=32, bias=True, name='lstmcell_1')
self.lstmcell_out, (h, c) = self.lstmcell(self.lstmcell_input, (self.lstmcell_prev_h, self.lstmcell_prev_c))
self.lstm_input = tlx.nn.Input([23, 32, 16], name='input')
self.lstm_prev_h = tlx.nn.Input([4, 32, 32])
self.lstm_prev_c = tlx.nn.Input([4, 32, 32])
self.lstm = tlx.nn.LSTM(input_size=16, hidden_size=32, bias=True, num_layers=2, bidirectional=True,
batch_first=False, dropout=0, name='lstm_1')
self.lstm_out, (h, c) = self.lstm(self.lstm_input, (self.lstm_prev_h, self.lstm_prev_c))
self.grucell_input = tlx.nn.Input([4, 16], name='input')
self.grucell_prev_h = tlx.nn.Input([4, 32])
self.grucell = tlx.nn.GRUCell(input_size=16, hidden_size=32, bias=True, name='grucell_1')
self.grucell_out, h = self.grucell(self.grucell_input, self.grucell_prev_h)
self.gru_input = tlx.nn.Input([23, 32, 16], name='input')
self.gru_prev_h = tlx.nn.Input([4, 32, 32])
self.gru = tlx.nn.GRU(input_size=16, hidden_size=32, bias=True, num_layers=2, bidirectional=True,
batch_first=False, dropout=0, name='GRU_1')
self.gru_out, h = self.gru(self.gru_input, self.gru_prev_h)
@classmethod
def tearDownClass(self):
pass
def test_layer_n1(self):
self.assertEqual(tlx.get_tensor_shape(self.rnncell_out), [4, 32])
def test_layer_n2(self):
self.assertEqual(tlx.get_tensor_shape(self.rnn_out), [23, 32, 64])
def test_layer_n3(self):
self.assertEqual(tlx.get_tensor_shape(self.lstmcell_out), [4, 32])
def test_layer_n4(self):
self.assertEqual(tlx.get_tensor_shape(self.lstm_out), [23, 32, 64])
def test_layer_n5(self):
self.assertEqual(tlx.get_tensor_shape(self.grucell_out), [4, 32])
def test_layer_n6(self):
self.assertEqual(tlx.get_tensor_shape(self.gru_out), [23, 32, 64])
class Layer_Transformer_Test(CustomTestCase):
@classmethod
def setUpClass(self):
self.multiheadattention_q = tlx.nn.Input(shape=(4,2,128),init=tlx.initializers.ones())
self.multiheadattention_attn_mask = tlx.convert_to_tensor(np.zeros((4,4)),dtype='bool')
self.multiheadattention = tlx.nn.MultiheadAttention(embed_dim=128, num_heads=4)
self.multiheadattention_out = self.multiheadattention(
self.multiheadattention_q, attn_mask=self.multiheadattention_attn_mask
)
self.transformerencoderLayer_q = tlx.nn.Input(shape=(4, 2, 128), init=tlx.initializers.ones())
self.transformerencoderLayer_attn_mask = tlx.convert_to_tensor(np.zeros((4, 4)), dtype='bool')
self.encoder = tlx.nn.TransformerEncoderLayer(128, 2, 256)
self.encoderlayer_out = self.encoder(self.transformerencoderLayer_q, src_mask=self.transformerencoderLayer_attn_mask)
self.transformerdecoderLayer_q = tlx.nn.Input(shape=(4, 2, 128), init=tlx.initializers.ones())
self.encoder_layer = tlx.nn.TransformerDecoderLayer(128, 2, 256)
self.decoderlayer_out = self.encoder_layer(self.transformerdecoderLayer_q, self.transformerdecoderLayer_q)
self.transformerencoder_q = tlx.nn.Input(shape=(4, 2, 128), init=tlx.initializers.ones())
self.transformerencoder_attn_mask = tlx.convert_to_tensor(np.zeros((4, 4)), dtype='bool')
self.encoder_layer = tlx.nn.TransformerEncoderLayer(128, 2, 256)
self.encoder = tlx.nn.TransformerEncoder(self.encoder_layer, num_layers=3)
self.encoder_out = self.encoder(self.transformerencoder_q, mask=self.transformerencoder_attn_mask)
self.transformeradecoder_q = tlx.nn.Input(shape=(4, 2, 128), init=tlx.initializers.ones())
self.decoder_layer = tlx.nn.TransformerDecoderLayer(128, 2, 256)
self.decoder = tlx.nn.TransformerDecoder(self.decoder_layer, num_layers=3)
self.decoder_out = self.decoder(self.transformeradecoder_q, self.transformeradecoder_q)
self.src = tlx.nn.Input(shape=(4, 2, 128), init=tlx.initializers.ones())
self.tgt = tlx.nn.Input(shape=(4, 2, 128), init=tlx.initializers.ones())
self.layer = tlx.nn.Transformer(d_model=128, nhead=4)
self.out = self.layer(self.src, self.tgt)
@classmethod
def tearDownClass(self):
pass
def test_layer_n7(self):
self.assertEqual(tlx.get_tensor_shape(self.multiheadattention_out[0]), [4, 2, 128])
def test_layer_n8(self):
self.assertEqual(tlx.get_tensor_shape(self.encoderlayer_out), [4, 2, 128])
def test_layer_n9(self):
self.assertEqual(tlx.get_tensor_shape(self.decoderlayer_out), [4, 2, 128])
def test_layer_n10(self):
self.assertEqual(tlx.get_tensor_shape(self.encoder_out), [4, 2, 128])
def test_layer_n11(self):
self.assertEqual(tlx.get_tensor_shape(self.decoder_out), [4, 2, 128])
def test_layer_n12(self):
self.assertEqual(tlx.get_tensor_shape(self.out), [4, 2, 128])
if __name__ == '__main__':
unittest.main()
|
py | 7dfb484349ebb73ec10ed61296b7a774bbd68e9e | ##########################################################################
#
# Copyright (c) 2022, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferArnold
Gaffer.Metadata.registerNode(
GafferArnold.ArnoldImager,
"description",
"""
Assigns an imager. This is stored as an `ai:imager` option in Gaffer's
globals, and applied to all render outputs.
> Tip : Use the `layer_selection` parameter on each imager to control
> which AOVs the imager applies to.
""",
plugs = {
"imager" : [
"description",
"""
The imager to be assigned. The output of an ArnoldShader node
holding an imager should be connected here. Multiple imagers may be
assigned at once by chaining them together via their `input`
parameters, and then assigning the final imager via the ArnoldImager
node.
""",
"noduleLayout:section", "left",
"nodule:type", "GafferUI::StandardNodule",
],
"mode" : [
"description",
"""
The mode used to combine the `imager` input with any imagers that
already exist in the globals.
- Replace : Removes all pre-existing imagers, and replaces them with
the new ones.
- InsertFirst : Inserts the new imagers so that they will be run before
any pre-existing imagers.
- InsertLast : Inserts the new imagers so that they will be run after
any pre-existing imagers.
""",
"preset:Replace", GafferArnold.ArnoldImager.Mode.Replace,
"preset:InsertFirst", GafferArnold.ArnoldImager.Mode.InsertFirst,
"preset:InsertLast", GafferArnold.ArnoldImager.Mode.InsertLast,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
}
)
|
py | 7dfb48f1ade30cd75fdb4d968f910e57f8c3774b | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import numpy as np
import cunumeric as num
def test():
anp = np.random.randn(4, 5)
b = random.randint(1, 13)
a = num.array(anp)
assert np.array_equal(a / b, anp / b)
assert np.array_equal(b / a, b / anp)
return
if __name__ == "__main__":
test()
|
py | 7dfb491bcb3fe23c969e84434c084c203a702ef2 | import json
import datetime
import os
from firebase import firebase
from firebase_profile import FIREBASE_URL
class Pothole:
def __init__(self, lat: float, lng: float, depth: str=None, length: str=None, image: str=None):
self.lat = lat
self.lng = lng
self.depth = depth
self.length = length
self.image = image
def to_dict(self):
"""
Format the pothole to a dictionary so that it is easier to be added into the database.
"""
obj = {}
obj['latitude'] = self.lat
obj['longitude'] = self.lng
if self.depth:
obj['depth'] = self.depth
if self.length:
obj['length'] = self.length
if self.image:
obj['image'] = self.image
return obj
class MyFirebase:
def __init__(self, url, auth=None):
self.url = url
self.fb = firebase.FirebaseApplication(url, auth)
self.url_potholes = self.url + '/potholes'
def rename_node(self, parent_url, old_node, new_node):
"""
Rename the node at the given path
Parameters
----------
parent_url : str
The url of the parent folder, which should end with '/'
old_node : str
The old name of the node
new_node : str
The new name of the node
Returns
-------
rename successfully: bool
True/False
"""
try:
content = self.fb.get(parent_url, old_node)
if not content:
print("Error: rename_node: old node %s does NOT exist yet" % (parent_url + old_node))
return False
content_new = self.fb.get(parent_url, new_node)
if content_new:
print("Error: rename_node: new node %s already exists" % (parent_url + new_node))
return False
self.fb.put(parent_url, new_node, content)
self.fb.delete(parent_url, old_node)
return True
except:
# TODO: revert partial changes if needed
print("Error: rename_node: url %s, old name %s, new name %s" % (parent_url, old_node, new_node))
return False
def remove_node(self, parent_url, *nodes):
"""
Remove the given nodes from the specified url
Parameter
---------
parent_url : str
The url of the parent folder, which should end with '/'
nodes : [str]
The list of nodes to be removed
"""
for node in nodes:
# TODO: add some checking?
self.fb.delete(parent_url, node)
def save_database_to_file(self, output_file):
"""
Save the database into file
"""
print("Save database to file %s" % output_file)
database = self.fb.get(self.url, None)
# print(database)
with open(output_file, 'w') as fp:
json.dump(database, fp)
def backup_database(self):
"""
Backup the database
"""
current_time = datetime.datetime.utcnow()
backup_folder = self.get_backup_folder()
backup_file = os.path.join(backup_folder, 'UTC - ' + str(current_time) + '.json')
self.save_database_to_file(backup_file)
def get_latest_backup(self):
"""
Get the latest backup file
"""
backup_folder = self.get_backup_folder()
files = os.listdir(backup_folder)
files = [f for f in files if os.path.isfile(os.path.join(backup_folder, f)) and not f.startswith('.')]
files = sorted(files)
if files:
latest = files[-1]
return os.path.join(backup_folder, latest)
return None
def recover_from_latest_backup(self):
"""
Load the latest backup into the database
"""
latest_backup = self.get_latest_backup()
if not latest_backup:
print("No backup available.")
return
with open(latest_backup, 'r') as fp:
database = json.load(fp)
# print(database)
print("recover from %s" % latest_backup)
for top_key, values in database.items():
self.fb.put(self.url, top_key, values)
def get_backup_folder(self):
"""
Get the backup folder
"""
backup_folder = os.path.join(os.getcwd(), 'backup')
if not os.path.isdir(backup_folder):
os.mkdir(backup_folder)
return backup_folder
def pothole_exist(self, pothole: Pothole):
"""
Check if the given pothole is already existing in the database.
Returns
-------
True if pothole already exists.
"""
potholes = self.fb.get(self.url_potholes, None)
for _, pot in potholes.items():
if pot['latitude'] == pothole.lat and pot['longitude'] == pothole.lng:
return True
return False
def post_pothole(self, pothole: Pothole):
"""
Add given pothole into the database via POST
"""
if self.pothole_exist(pothole):
print("pothole exists")
return
self.fb.post(self.url_potholes, pothole.to_dict())
def init_db(mfb: MyFirebase):
"""
Add a couple of potholes into the database
"""
# mfb.backup_database()
potholes = [(42.999938,-78.797406), (42.980365,-78.807876), (42.999499,-78.794131)]
for pot in potholes:
pothole = Pothole(pot[0], pot[1], '40cm', '3cm', '<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMcAAACRCAAAAABSufPwAAAAAXNSR0IArs4c6QAADCRJREFUeNrtnXt0FNUdx/2/PWqPEEh25t77m2xCQghIQCDyjEpFUQQLxKoYH4AWLVoFqqAC1oOiImC10CNq1VZr8YkCHosopweQGhBqDMVAAIsJj7wUdvfemd29PXd2NzubnZmdXbKJ4cz9I4csm8x87vf3ur97Z3MeOzfGeS6Hy+FyuBwuh8uRIQdltGsHo9nRg/KuHYEs2ZX/ubLRXTeGPsuzpIdvSZ9hI4Z3zRgxpM8jWeN45JI99Ye6ZhzZRB7NGsfDI0+Ggl0z+FdKNjkauypmBXdnmaNrhsvhcrgcLofL4XK4HC6Hy+FyuBwuh8vhcrgcLofL4XL0OI6z3IX4qXBQ9Ww2IVSaAUdky6ez9Ti+fOGiTMcLp9LWg7L4ThJlmW0qmXLsufL5tX/ObKycekBNk4MyqtKDq5a/svmLutMay2x3zJSjem7GO2Jt82rV9PVoWjmWYKWg+PKZ7we0ztOj+p4fMrXVE/fvT5ODsoDvXowJABCMpaV+NRNBup+DBfibgBUFFDGg4HWV9kw9eE2ZrEQHKGRMbY/UgzJ+hxRTQ1EA8t9VeyQH3zeEKPEBZLXWI/1DXZsPRg78dE/koKxpJkng8L7XM+2qehRRDCBQurNnxt0NxUb3UGDobjWDSqvb7erMYgxGuyKTjqkZFFndy0EZbbhagkQ9Hj2qpS9It3JQFuCrEzEUQLmVTTzQk/SgjGpqEVYSOUCRpzZq6Va93chBmarSJyQlIerqIHlVJ9NdiGTAIaZK1YKa6XWcclDGgoGd624vgg5yEBkTQPe0pukj6XOIZY//vx+//VGzmfjOOMRka/+84WKMSAc1cP+nqzAQedahcFqmlZEep+YNK8Aw8YBJn8URh1iNB18eKNYcSqJZ4cKXefUAoiie8TWhdEAy4fAvxwAAaLJPzVQPtfVJL0pk0PW48HGuaX+QARR0zVdaGj6SiZ83TswTNwD5r/OM9KAs2DoHdYhTkdH7zn8F+PGrJDFNk74MOveRTDiO/dITiZB3tWVoV6cfxpCkhi6IVDLvG76jDAGAZ8hXPKscrVW5+l2QsbWZcFAWeKuYmFBEoq5UsZ2vEJiKfE2D46iVNofw0fs8+k1A0cYk+03JIZz32GWyGUYURh68YteQyEQ9SbPkH+IutOX5kWgJefNPU5boi6n1oDQ4p68lhQIKYGlIsf4GcvEnwazoISoJ7fHecqRCBVSwmwcTJXHAwTdKoNiBKBCdJ5Cua3RYxKerh+p7jHhH5kdLbTT4txv8CYqktivVf5Wpc0ReA+M3CvR6gTvLImlxUMbOLCKosmlm7Jogy/MCxuI0NUd4fRExFwIXAolOUOwrKq3WOl8PqvoW5CJvXeMcaJ810rfyBy0dPU7MNlcD5IqNSwZ5ZIESf4M85YTWyXpQxsK/6wu567eUy2Bwy9zb/fGaMRUHZTtHmMdcNKEm7Nu5clphrgcb6Er2hDrbrih/uwBIya/zERgNGQrWxS+VgoMy31oFTPVAs5sZ1bi2/YVZhhU7GlkX7HQ9DlcQUAgS9240cTLhsHM9Wm/NA/O0Mf6IymiAcf6/SVL7r85/I9TZ/kEDy2TddIkkj5oc7w4AKH8NxEBSchwuReYc0og6VcSm8L6b4naH+zmTIw0Oyo5H6kMyZPa6o83XeeK3kzujxaEeVN31M4u0IVXUqyygnXllOIo7EPFuCXd23KX0Cb2682wJhbWTUw3mgZV6p/7BV/3CggPdcYpR/v2DOF7Pg3LRYoelYhp6UPUvXl2P4lf97MQUo5mf/14omnZTchhiXQeOZUzVtk9CCcnl5//gWcjn/xkoCgbA6IH64IJcw/30nsud+Ifo8xRj85oEhn4ablk+GCWULJhsCdNOr69o053R3Jc39u1NA+LXA884Rxxik0A2T+YKWRTacbOHJGBA/+eyULdTxl/yxsK6d0whxMsHyGkOOrArSvkCC7NSYNr9xVIHqcj4g9lYf9DWW0h7+SMySXzycvZy6kiP+VYrD8AS7vg/0G+9FuhkDtHu2VSS0FEuGthuI7nvOuGgKpspmxbqIgslr3RB+o3PYdPEKQdlNPjNMKNRAJSNQ7FvPE854gg2XotNFk6WyxE8YF+wk+t2Gmob5+lQocYLpbxV0fhozxE6eiW20MPc2Hot0DpTD7EObJ6Uq88cGJYI7deXljjSI3xgTLIenslzJbBa48qLf1SdKOJQD8pO3SXpGEQkqqTL4smcOdHjs8GQzHHjHZJVEFNI4VrVyQPZjjhEj2Qp0h0RzX6jFCXbMxnVFEzNwYIbS5OKdhCBz9KwFDJom5N2nBMOkYbfwfrRCTSxge9JrlgBLqkJO9AjvHcoMTUfxbrtIJWLkzf07Dkoo3yHWHWAgkuO8QDfekGycfTfzJ1w/LuMmPYXbEGmHEnde3ekR+hwuZ6+oM9rPMDU2lGogyEAXNqQ2q4o094pAZtbNucAPL0lZfRNySHy3/fTI3k8b1qbfnbt2G0dfZ2MduIflK4scIKR2DNRQL41ZdBKxSEiblOVvg4EKPxQf5lqTSM9iUYNY/x6l5/a69F4GziiAIyx4QJAlrSoGeoR+7ALFjo0PScaOqraIpNC+dc3SQkGgqo4ZcIdT9rGq/3jiSNLkqa9OTMX4sspgpdl7B9UC6qhMP/x0/E5UY3xulB7/GpZYCzAcf8DnLXs/fLFu2ZNr7PhqLuGOJAD0N0NwYbrDd1TwPkf2DcWrfVQvzv04zfbV91QElMYvB9F3yh+Y9tzCLXndlI846FZ1w8tVZA8ut6GY/cw4gBDvuW4RtneccTQJCXD9gcDNiDWHHwO9M9XIN7eg35buG5tkcQY+mQ4bi9RgBCsb7YROw76ljd1uAJ5fqvYGgxvMZzLAgVPPWJ3lNSC44yq8rlRF2jvXfT/nFOmqarKVPGF88fbG2btb7Tl8D2Vm9rP8YCvI9PFXzWs1UH0IWxOB5if353xyaZtdfvj7bDoEcj3Oa/f8O6Hn9XUfVtbvfWjl2/OT7otez2qJ0BKs8J520IRzbVrPWCIWdLqEE1Pj69L+hUUjZ44MnGBBt55R1aNyfcW9CsfO+bSsqICwCaFlg0HZQcnQaq8gQtnN0QO/FB1BzG6Ey7YY70Vbc5RJE7TQuL6RneC6OuxYdIusPXzmgqSIuLKV2/jsVQRCDxoqORAQeU1lunQggNswontndhzHJoM9qEqr7Kex9yZMlY9wtAlAgVduy9ssaeeLkeqYc9x+Ff2+2nyxG+DxtWG79HEcxt4wiGLvlxXcoi6hNi6ePF6NRCfb0qDhy9Fiaml4qh50Mo2R4NqeBaC+h+yNlgA+bK/+RNvkvI/yYku6qnYavqBXdnlWDTKH/BzHiWhTPt4ELHqX5Gcm/aFO+yhUa1lvJxoe9LQDWET08oux+LieysrZ2wMc85D4mU1tMK8nyiC4fwGtUPGFiu4J/omFtaAyv7Ow+0Kx95vyrE398JMxwWDDho4HuslQrVUsfSBVTuPt532U19tCTZ3DeW2Ex3PFOiG9WKfpBRF7tnaxjQaEAYWiGpj+nwUpYGMR3SGohy9IxVTr5yLiq6Ydt9ja2aWFoLZoRJP2bJms40nGtoIJEm63krVmu1NbW0+xvxtkV3v7D6vpnNEai+clychIiX1WsT3yFOxWQuZlbM0+HkxSQbHHlQ2ZXrl/X98/vc37lJZ1p8fjOgR6yWAWQ9XAcjrN+eARUeEBrcWE9PghhFCRDzC85IeG7qIw7olAvlFlZ9xqw8/pNrOgdi8wxId8lNq13JYpY1LPmCcJtiy+FFVZXrRq9YaM2EyD8l/LXJGq5s5FIAJm0/6GBMPu+kfWBfi4rTgmbZQOBJ3JbveFi5Y3Mw41xjnNd3MQdCgiXcvfHjN5i92HGtsaGj4bsOzCxcuvHvGM/u52MBajW04iHdNmPPqVxY/tGLdMrl77Ur0dzFCHm9JyfDy8vLy4V4ZIYwJLl3Zoqr+XRXIprb0XjbhyssHIoRkGfXpVj3aG6MEY1mM2GIW8PlXvLFrYW9kW+hLHknCkTDY7RwWwQwUDwzssINr9WPiHz8FDosVm37Az2mt9xPlSHu4HC6Hy+FyuBwuh8vhcthwLBV9ny4akJO1z28/Vz5P/xz5+wbnzN+bOAeGy+FyuBwuh8vhcmQy/g8nC6N47daFmQAAAABJRU5ErkJggg==" alt="" />')
mfb.post_pothole(pothole)
if __name__ == '__main__':
myfb = MyFirebase(FIREBASE_URL)
init_db(myfb)
|
py | 7dfb4aa59831887bb5dfce8a41823e9fc7263400 | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from [email protected]. #
##############################################################################
import numpy
import time
from .. import hsds_logger as log
def getArraySize(arr):
""" Return size in bytes of numpy array """
nbytes = arr.dtype.itemsize
for n in arr.shape:
nbytes *= n
return nbytes
class Node(object):
def __init__(self, id, data,
mem_size=1024, isdirty=False, prev=None, next=None):
self._id = id
self._data = data
self._mem_size = mem_size
self._isdirty = isdirty
self._prev = prev
self._next = next
self._last_access = time.time()
class LruCache(object):
""" LRU cache for Numpy arrays that are read/written from S3
If name is "ChunkCache", chunk items are assumed by be ndarrays
"""
def __init__(self, mem_target=32*1024*1024,
name="LruCache", expire_time=None):
self._hash = {}
self._lru_head = None
self._lru_tail = None
self._mem_size = 0
self._dirty_size = 0
self._mem_target = mem_target
self._expire_time = expire_time
self._name = name
self._dirty_set = set()
def _delNode(self, key):
# remove from LRU
if key not in self._hash:
raise KeyError(key)
node = self._hash[key]
prev = node._prev
next_node = node._next
if prev is None:
if self._lru_head != node:
raise KeyError("unexpected error")
self._lru_head = next_node
else:
prev._next = next_node
if next_node is None:
if self._lru_tail != node:
raise KeyError("unexpected error")
self._lru_tail = prev
else:
next_node._prev = prev
node._next = node._prev = None
log.debug(f"LRU {self._name} node {node._id} removed {self._name}")
return node
def _moveToFront(self, key):
# move this node to the front of LRU list
if key not in self._hash:
raise KeyError(key)
node = self._hash[key]
if self._lru_head == node:
# already the front
return node
if node._prev is None:
raise KeyError("unexpected error")
prev = node._prev
next_node = node._next
node._prev = None
node._next = self._lru_head
prev._next = next_node
self._lru_head._prev = node
if next_node is not None:
next_node._prev = prev
else:
if self._lru_tail != node:
raise KeyError("unexpected error")
self._lru_tail = prev
self._lru_head = node
return node
def _hasKey(self, key, ignore_expire=False):
""" check if key is present node """
if key not in self._hash:
return False
if ignore_expire:
return True
node = self._hash[key]
now = time.time()
if self._expire_time:
age = now - node._last_access
if age > self._expire_time and not node._isdirty:
msg = f"LRU {self._name} node {key} has been in cache for "
msg += f"{now - node._last_access:.3f} seconds, expiring"
log.debug(msg)
return False
else:
return True
else:
return True
def __delitem__(self, key):
node = self._delNode(key) # remove from LRU
del self._hash[key] # remove from hash
# remove from LRU list
self._mem_size -= node._mem_size
if key in self._dirty_set:
log.warning(f"LRU {self._name} removing dirty node: {key}")
self._dirty_set.remove(key)
self._dirty_size -= node._mem_size
def __len__(self):
""" Number of nodes in the cache """
return len(self._hash)
def __iter__(self):
""" Iterate over node ids """
node = self._lru_head
while node is not None:
yield node._id
node = node._next
def __contains__(self, key):
""" Test if key is in the cache """
return self._hasKey(key)
def __getitem__(self, key):
""" Return numpy array from cache """
# doing a getitem has the side effect of moving this node
# up in the LRU list
if not self._hasKey(key):
raise KeyError(key)
node = self._moveToFront(key)
return node._data
def __setitem__(self, key, data):
log.debug(f"setitem, key: {key}")
if isinstance(data, numpy.ndarray):
# can just compute size for numpy array
mem_size = getArraySize(data)
elif isinstance(data, dict):
# TBD - come up with a way to get the actual data size
# for dict objects
mem_size = 1024
elif isinstance(data, bytes):
mem_size = len(data)
else:
raise TypeError("Unexpected type for LRUCache")
if key in self._hash:
# key is already in the LRU - update mem size, data and
# move to front
node = self._hash[key]
old_size = self._hash[key]._mem_size
mem_delta = node._mem_size - old_size
self._mem_size += mem_delta
node._data = data
node._mem_size = mem_size
self._moveToFront(key)
if node._isdirty:
self._dirty_size += mem_delta
node._last_access = time.time()
msg = f"LRU {self._name} updated node: {key} "
msg += f"[was {old_size} bytes now {node._mem_size} bytes]"
log.debug(msg)
else:
node = Node(key, data, mem_size=mem_size)
if self._lru_head is None:
self._lru_head = self._lru_tail = node
else:
# newer items go to the front
next_node = self._lru_head
if next_node._prev is not None:
raise KeyError("unexpected error")
node._next = next_node
next_node._prev = node
self._lru_head = node
self._hash[key] = node
self._mem_size += node._mem_size
msg = f"LRU {self._name} adding {node._mem_size} to cache, "
msg += "mem_size is now: {self._mem_size}"
log.debug(msg)
if node._isdirty:
self._dirty_size += node._mem_size
msg = f"LRU {self._name} dirty size is now: {self._dirty_size}"
log.debug(msg)
msg = f"LRU {self._name} added new node: {key} "
msg += f"[{node._mem_size} bytes]"
log.debug(msg)
if self._mem_size > self._mem_target:
# set dirty temporarily so we can't remove this node in reduceCache
msg = f"LRU {self._name} mem_size greater than target "
msg += f"{self._mem_target} reducing cache"
log.debug(msg)
isdirty = node._isdirty
node._isdirty = True
self._reduceCache()
node._isdirty = isdirty
def _reduceCache(self):
# remove nodes from cache (if not dirty) until we are under
# memory mem_target
log.debug(f"LRU {self._name} reduceCache")
node = self._lru_tail # start from the back
while node is not None:
next_node = node._prev
if not node._isdirty:
log.debug(f"LRU {self._name} removing node: {node._id}")
self.__delitem__(node._id)
if self._mem_size <= self._mem_target:
msg = f"LRU {self._name} mem_size reduced below target"
log.debug(msg)
break
else:
pass # can't remove dirty nodes
node = next_node
if self._mem_size > self._mem_target:
msg = f"LRU {self._name} mem size of {self._mem_size} "
msg += f"not reduced below target {self._mem_target}"
log.debug(msg)
# done reduceCache
def clearCache(self):
# remove all nodes from cache
log.debug(f"LRU {self._name} clearCache")
node = self._lru_tail # start from the back
while node is not None:
next_node = node._prev
if node._isdirty:
msg = f"LRU {self._name} found dirty node during clear: "
msg += f"{node._id}"
log.error(msg)
raise ValueError("Unable to clear cache")
log.debug(f"LRU {self._name} removing node: {node._id}")
self.__delitem__(node._id)
node = next_node
# done clearCache
def consistencyCheck(self):
""" verify that the data structure is self-consistent """
id_list = []
dirty_count = 0
mem_usage = 0
dirty_usage = 0
# walk the LRU list
node = self._lru_head
node_type = None
while node is not None:
id_list.append(node._id)
if node._id not in self._hash:
raise ValueError(f"node: {node._id} not found in hash")
if node._isdirty:
dirty_count += 1
if node._id not in self._dirty_set:
msg = f"expected to find id: {node._id} in dirty set"
raise ValueError(msg)
dirty_usage += node._mem_size
mem_usage += node._mem_size
if node_type is None:
node_type = type(node._data)
else:
if not isinstance(node._data, node_type):
raise TypeError("Unexpected datatype")
node = node._next
# finish forward iteration
if len(id_list) != len(self._hash):
msg = "unexpected number of elements in forward LRU list"
raise ValueError()
if dirty_count != len(self._dirty_set):
raise ValueError("unexpected number of dirty nodes")
if mem_usage != self._mem_size:
raise ValueError("unexpected memory size")
if dirty_usage != self._dirty_size:
raise ValueError("unexpected dirty size")
# go back through list
node = self._lru_tail
pos = len(id_list)
reverse_count = 0
while node is not None:
reverse_count += 1
if pos == 0:
raise ValueError(f"unexpected node: {node._id}")
if node._id != id_list[pos - 1]:
msg = f"expected node: {id_list[pos-1]} but found: {node._id}"
raise ValueError(msg)
pos -= 1
node = node._prev
if reverse_count != len(id_list):
msg = "elements in reverse list do not equal forward list"
raise ValueError(msg)
# done - consistencyCheck
def setDirty(self, key):
""" setting dirty flag has the side effect of moving this node
up in the LRU list """
log.debug(f"LRU {self._name} set dirty node id: {key}")
node = self._moveToFront(key)
if not node._isdirty:
self._dirty_size += node._mem_size
node._isdirty = True
self._dirty_set.add(key)
def clearDirty(self, key):
""" clear the dirty flag """
# clearing dirty flag has the side effect of moving this node
# up in the LRU list
# also, may trigger a memory cleanup
log.debug(f"LRU {self._name} clear dirty node: {key}")
node = self._moveToFront(key)
if node._isdirty:
self._dirty_size -= node._mem_size
node._isdirty = False
if key in self._dirty_set:
self._dirty_set.remove(key)
if self._mem_size > self._mem_target:
# maybe we can free up some memory now
self._reduceCache()
def isDirty(self, key):
""" return dirty flag """
# don't adjust LRU position
return key in self._dirty_set
def dump_lru(self):
""" Return LRU list as a string
(for debugging)
"""
node = self._lru_head
s = "->"
while node:
s += node._id
node = node._next
if node:
s += ","
node = self._lru_tail
s += "\n<-"
while node:
s += node._id
node = node._prev
if node:
s += ","
s += "\n"
return s
@property
def cacheUtilizationPercent(self):
return int((self._mem_size/self._mem_target)*100.0)
@property
def dirtyCount(self):
return len(self._dirty_set)
@property
def memUsed(self):
return self._mem_size
@property
def memTarget(self):
return self._mem_target
@property
def memDirty(self):
return self._dirty_size
|
py | 7dfb4b401294badbe5dc3249968ac1d37552be7c | import copy
import errno
import json
import logging
import os
from pbr.version import VersionInfo
import requests
import time
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
# Async strategies
ASYNC_CONTINUE = 'continue'
ASYNC_PAUSE = 'pause'
ASYNC_BLOCK = 'block'
class UnconfiguredException(Exception):
pass
class APIException(Exception):
def __init__(self, message, method, url, status_code, text):
self.message = message
self.method = method
self.url = url
self.status_code = status_code
self.text = text
class RequestMalformedException(APIException):
pass
class UnauthorizedException(APIException):
pass
class ResourceCannotBeDeletedException(APIException):
pass
class ResourceNotFoundException(APIException):
pass
class DependenciesNotReadyException(APIException):
pass
class ResourceInUseException(APIException):
pass
class InternalServerError(APIException):
pass
class InsufficientResourcesException(APIException):
pass
class UnknownAsyncStrategy(APIException):
pass
STATUS_CODES_TO_ERRORS = {
400: RequestMalformedException,
401: UnauthorizedException,
403: ResourceCannotBeDeletedException,
404: ResourceNotFoundException,
406: DependenciesNotReadyException,
409: ResourceInUseException,
500: InternalServerError,
507: InsufficientResourcesException,
}
def _calculate_async_deadline(strategy):
if strategy == ASYNC_CONTINUE:
return -1
if strategy == ASYNC_PAUSE:
return 60
if strategy == ASYNC_BLOCK:
return 3600
raise UnknownAsyncStrategy('Async strategy %s is unknown' % strategy)
class Client(object):
def __init__(self, base_url=None, verbose=False,
namespace=None, key=None, sync_request_timeout=300,
suppress_configuration_lookup=False, logger=None,
async_strategy=ASYNC_BLOCK):
global LOG
if verbose:
LOG.setLevel(logging.DEBUG)
if logger:
LOG = logger
self.sync_request_timeout = sync_request_timeout
if not suppress_configuration_lookup:
# Where do we find authentication details? First off, we try command line
# flags; then environment variables (thanks for doing this for free click);
# ~/.shakenfist (which is a JSON file); and finally /etc/sf/shakenfist.json.
if not base_url:
user_conf = os.path.expanduser('~/.shakenfist')
if os.path.exists(user_conf):
with open(user_conf) as f:
d = json.loads(f.read())
if not namespace:
namespace = d['namespace']
if not key:
key = d['key']
if not base_url:
base_url = d['apiurl']
if not base_url:
try:
if os.path.exists('/etc/sf/shakenfist.json'):
with open('/etc/sf/shakenfist.json') as f:
d = json.loads(f.read())
if not namespace:
namespace = d['namespace']
if not key:
key = d['key']
if not base_url:
base_url = d['apiurl']
except IOError as e:
if e.errno != errno.EACCES:
raise
if not base_url:
raise UnconfiguredException(
'You have not specified the server to communicate with')
self.base_url = base_url
self.namespace = namespace
self.key = key
self.async_strategy = async_strategy
LOG.debug('Client configured with apiurl of %s for namespace %s '
'and async strategy %s'
% (self.base_url, self.namespace, self.async_strategy))
self.cached_auth = None
def _actual_request_url(self, method, url, data=None, data_is_binary=False,
allow_redirects=True):
url = self.base_url + url
h = {'Authorization': self.cached_auth,
'User-Agent': get_user_agent()}
if data:
if data_is_binary:
h['Content-Type'] = 'application/octet-stream'
else:
h['Content-Type'] = 'application/json'
data = json.dumps(data, indent=4, sort_keys=True)
start_time = time.time()
r = requests.request(method, url, data=data, headers=h,
allow_redirects=allow_redirects)
end_time = time.time()
LOG.debug('-------------------------------------------------------')
LOG.debug('API client requested: %s %s' % (method, url))
if data:
if data_is_binary:
LOG.debug('Data: ...binary omitted...')
else:
LOG.debug('Data:\n %s' % '\n '.join(data.split('\n')))
for h in r.history:
LOG.debug('URL request history: %s --> %s %s'
% (h.url, h.status_code, h.headers.get('Location')))
LOG.debug('API client response: code = %s (took %.02f seconds)'
% (r.status_code, (end_time - start_time)))
if r.text:
if data_is_binary:
LOG.debug('Data: ...binary omitted...')
else:
try:
LOG.debug('Data:\n %s'
% ('\n '.join(json.dumps(json.loads(r.text),
indent=4,
sort_keys=True).split('\n'))))
except Exception:
LOG.debug('Text:\n %s'
% ('\n '.join(r.text.split('\n'))))
LOG.debug('-------------------------------------------------------')
if r.status_code in STATUS_CODES_TO_ERRORS:
raise STATUS_CODES_TO_ERRORS[r.status_code](
'API request failed', method, url, r.status_code, r.text)
acceptable = [200]
if not allow_redirects:
acceptable.append(301)
if r.status_code not in acceptable:
raise APIException(
'API request failed', method, url, r.status_code, r.text)
return r
def _authenticate(self):
LOG.debug('Authentication request made, contents not logged')
auth_url = self.base_url + '/auth'
r = requests.request('POST', auth_url,
data=json.dumps(
{'namespace': self.namespace,
'key': self.key}),
headers={'Content-Type': 'application/json',
'User-Agent': get_user_agent()})
if r.status_code != 200:
raise UnauthorizedException('API unauthorized', 'POST', auth_url,
r.status_code, r.text)
return 'Bearer %s' % r.json()['access_token']
def _request_url(self, method, url, data=None, data_is_binary=False):
# NOTE(mikal): if we are not authenticated, probe the base_url looking
# for redirections. If we are redirected, rewrite our base_url to the
# redirection target.
if not self.cached_auth:
probe = self._actual_request_url('GET', '', allow_redirects=False)
if probe.status_code == 301:
LOG.debug('API server redirects to %s'
% probe.headers['Location'])
self.base_url = probe.headers['Location']
self.cached_auth = self._authenticate()
deadline = time.time() + _calculate_async_deadline(self.async_strategy)
while True:
try:
try:
return self._actual_request_url(
method, url, data=data, data_is_binary=data_is_binary)
except UnauthorizedException:
self.cached_auth = self._authenticate()
return self._actual_request_url(
method, url, data=data, data_is_binary=data_is_binary)
except DependenciesNotReadyException as e:
# The API server will return a 406 exception when we have
# specified an operation which depends on a resource and
# that resource is not in the created state.
if time.time() > deadline:
LOG.debug('Deadline exceeded waiting for dependancies')
raise e
LOG.debug('Dependencies not ready, retrying')
time.sleep(1)
def get_instances(self, all=False):
r = self._request_url('GET', '/instances', data={'all': all})
return r.json()
def delete_all_instances(self, namespace):
r = self._request_url('DELETE', '/instances',
data={'confirm': True,
'namespace': namespace})
deleted = r.json()
waiting_for = set(deleted)
deadline = time.time() + _calculate_async_deadline(self.async_strategy)
while waiting_for:
LOG.debug('Waiting for instances to deleted: %s'
% ', '.join(waiting_for))
if time.time() > deadline:
LOG.debug('Deadline exceeded waiting for instances to delete')
break
time.sleep(1)
for uuid in copy.copy(waiting_for):
inst = self.get_instance(uuid)
if not inst or inst['state'] == 'deleted':
LOG.debug('Instance %s is now deleted' % uuid)
waiting_for.remove(uuid)
return deleted
def get_instance(self, instance_uuid):
r = self._request_url('GET', '/instances/' + instance_uuid)
return r.json()
def get_instance_interfaces(self, instance_uuid):
r = self._request_url('GET', '/instances/' + instance_uuid +
'/interfaces')
return r.json()
def get_instance_metadata(self, instance_uuid):
r = self._request_url('GET', '/instances/' + instance_uuid +
'/metadata')
return r.json()
def set_instance_metadata_item(self, instance_uuid, key, value):
r = self._request_url('PUT', '/instances/' + instance_uuid +
'/metadata/' + key, data={'value': value})
return r.json()
def delete_instance_metadata_item(self, instance_uuid, key):
r = self._request_url('DELETE', '/instances/' + instance_uuid +
'/metadata/' + key)
return r.json()
def create_instance(self, name, cpus, memory, network, disk, sshkey, userdata,
namespace=None, force_placement=None, video=None, uefi=False):
body = {
'name': name,
'cpus': cpus,
'memory': memory,
'network': network,
'ssh_key': sshkey,
'user_data': userdata,
'namespace': namespace,
'video': video,
'uefi': uefi
}
if force_placement:
body['placed_on'] = force_placement
# Ensure size is always an int if specified
clean_disks = []
for d in disk:
if 'size' in d and d['size']:
d['size'] = int(d['size'])
clean_disks.append(d)
body['disk'] = clean_disks
r = self._request_url('POST', '/instances',
data=body)
i = r.json()
deadline = time.time() + _calculate_async_deadline(self.async_strategy)
while True:
if i['state'] not in ['initial', 'creating']:
return i
LOG.debug('Waiting for instance to be created')
if time.time() > deadline:
LOG.debug('Deadline exceeded waiting for instance to be created')
return i
time.sleep(1)
i = self.get_instance(i['uuid'])
def snapshot_instance(self, instance_uuid, all=False, device=None, label_name=None):
r = self._request_url(
'POST', '/instances/' + instance_uuid + '/snapshot',
data={'all': all, 'device': device})
out = r.json()
waiting_for = []
for s in out:
waiting_for.append(out[s]['blob_uuid'])
deadline = time.time() + _calculate_async_deadline(self.async_strategy)
while waiting_for:
LOG.debug('Waiting for snapshots: %s' % ', '.join(waiting_for))
if time.time() > deadline:
LOG.debug('Deadline exceeded waiting for snapshots')
break
time.sleep(1)
snaps = self.get_instance_snapshots(instance_uuid)
for s in snaps:
if s.get('blob_uuid') in waiting_for:
if s.get('state') == 'created':
LOG.debug('Blob %s now present' % s['blob_uuid'])
waiting_for.remove(s['blob_uuid'])
else:
LOG.debug('Blob %s not yet created' % s['blob_uuid'])
if not all and label_name:
# It only makes sense to update a label if we've snapshotted a single
# disk. Otherwise we'd immediately clobber the label with the last
# disk in the snapshot series.
if not device:
device = 'vda'
out['label'] = self.update_label(
label_name, out[device]['blob_uuid'])
return out
def get_instance_snapshots(self, instance_uuid):
r = self._request_url('GET', '/instances/' + instance_uuid +
'/snapshot')
return r.json()
def update_label(self, label_name, blob_uuid):
r = self._request_url(
'POST', '/label/%s' % label_name, data={'blob_uuid': blob_uuid})
return r.json()
def reboot_instance(self, instance_uuid, hard=False):
style = 'soft'
if hard:
style = 'hard'
r = self._request_url('POST', '/instances/' + instance_uuid +
'/reboot' + style)
return r.json()
def power_off_instance(self, instance_uuid):
r = self._request_url('POST', '/instances/' + instance_uuid +
'/poweroff')
return r.json()
def power_on_instance(self, instance_uuid):
r = self._request_url('POST', '/instances/' + instance_uuid +
'/poweron')
return r.json()
def pause_instance(self, instance_uuid):
r = self._request_url('POST', '/instances/' + instance_uuid +
'/pause')
return r.json()
def unpause_instance(self, instance_uuid):
r = self._request_url('POST', '/instances/' + instance_uuid +
'/unpause')
return r.json()
def delete_instance(self, instance_uuid, namespace=None, async_request=False):
# Why pass a namespace when you're passing an exact UUID? The idea here
# is that it provides a consistent interface, but also a safety check
# against overly zealous loops deleting things.
data = None
if namespace:
data = {'namespace': namespace}
self._request_url('DELETE', '/instances/' + instance_uuid, data=data)
if async_request:
return
i = self.get_instance(instance_uuid)
deadline = time.time() + _calculate_async_deadline(self.async_strategy)
while True:
if i['state'] == 'deleted':
return
LOG.debug('Waiting for instance to be deleted')
if time.time() > deadline:
LOG.debug('Deadline exceeded waiting for instance to delete')
return
time.sleep(1)
i = self.get_instance(instance_uuid)
def get_instance_events(self, instance_uuid):
r = self._request_url('GET', '/instances/' + instance_uuid + '/events')
return r.json()
def cache_artifact(self, image_url):
r = self._request_url('POST', '/artifacts', data={'url': image_url})
return r.json()
def upload_artifact(self, name, upload_uuid):
r = self._request_url('POST', '/artifacts/upload/%s' % name,
data={'upload_uuid': upload_uuid})
return r.json()
def get_artifact(self, artifact_uuid):
r = self._request_url('GET', '/artifacts/' + artifact_uuid)
return r.json()
def get_artifacts(self, node=None):
r = self._request_url('GET', '/artifacts', data={'node': node})
return r.json()
def get_artifact_events(self, artifact_uuid):
r = self._request_url('GET', '/artifacts/' + artifact_uuid + '/events')
return r.json()
def get_artifact_versions(self, artifact_uuid):
r = self._request_url(
'GET', '/artifacts/' + artifact_uuid + '/versions')
return r.json()
def delete_artifact(self, artifact_uuid):
r = self._request_url('DELETE', '/artifacts/' + artifact_uuid)
return r.json()
def delete_artifact_version(self, artifact_uuid, version_id):
r = self._request_url('DELETE', '/artifacts/' + artifact_uuid +
'/versions/' + str(version_id))
return r.json()
def get_networks(self, all=False):
r = self._request_url('GET', '/networks', data={'all': all})
return r.json()
def get_network(self, network_uuid):
r = self._request_url('GET', '/networks/' + network_uuid)
return r.json()
def delete_network(self, network_uuid, namespace=None):
# Why pass a namespace when you're passing an exact UUID? The idea here
# is that it provides a consistent interface, but also a safety check
# against overly zealous loops deleting things.
data = None
if namespace:
data = {'namespace': namespace}
r = self._request_url('DELETE', '/networks/' + network_uuid, data=data)
return r.json()
def delete_all_networks(self, namespace, clean_wait=False):
r = self._request_url('DELETE', '/networks',
data={'confirm': True,
'namespace': namespace,
'clean_wait': clean_wait,
})
return r.json()
def get_network_events(self, instance_uuid):
r = self._request_url('GET', '/networks/' + instance_uuid + '/events')
return r.json()
def allocate_network(self, netblock, provide_dhcp, provide_nat, name, namespace=None):
r = self._request_url('POST', '/networks',
data={
'netblock': netblock,
'provide_dhcp': provide_dhcp,
'provide_nat': provide_nat,
'name': name,
'namespace': namespace
})
n = r.json()
deadline = time.time() + _calculate_async_deadline(self.async_strategy)
while True:
if n['state'] not in ['initial', 'creating']:
return n
LOG.debug('Waiting for network to be created')
if time.time() > deadline:
LOG.debug('Deadline exceeded waiting for network to be created')
return n
time.sleep(1)
n = self.get_network(n['uuid'])
def get_network_interfaces(self, network_uuid):
r = self._request_url('GET', '/networks/' +
network_uuid + '/interfaces')
return r.json()
def get_network_metadata(self, network_uuid):
r = self._request_url('GET', '/networks/' + network_uuid +
'/metadata')
return r.json()
def set_network_metadata_item(self, network_uuid, key, value):
r = self._request_url('PUT', '/networks/' + network_uuid +
'/metadata/' + key, data={'value': value})
return r.json()
def delete_network_metadata_item(self, network_uuid, key):
r = self._request_url('DELETE', '/networks/' + network_uuid +
'/metadata/' + key)
return r.json()
def get_nodes(self):
r = self._request_url('GET', '/nodes')
return r.json()
def get_interface(self, interface_uuid):
r = self._request_url('GET', '/interfaces/' + interface_uuid)
return r.json()
def float_interface(self, interface_uuid):
r = self._request_url('POST', '/interfaces/' + interface_uuid +
'/float')
return r.json()
def defloat_interface(self, interface_uuid):
r = self._request_url('POST', '/interfaces/' + interface_uuid +
'/defloat')
return r.json()
def get_console_data(self, instance_uuid, length=None):
url = '/instances/' + instance_uuid + '/consoledata'
if length:
d = {'length': length}
else:
d = {}
r = self._request_url('GET', url, data=d)
return r.text
def delete_console_data(self, instance_uuid):
url = '/instances/' + instance_uuid + '/consoledata'
self._request_url('DELETE', url)
def get_namespaces(self):
r = self._request_url('GET', '/auth/namespaces')
return r.json()
def create_namespace(self, namespace):
r = self._request_url('POST', '/auth/namespaces',
data={'namespace': namespace})
return r.json()
def delete_namespace(self, namespace):
if not namespace:
namespace = self.namespace
self._request_url('DELETE', '/auth/namespaces/' + namespace)
def get_namespace_keynames(self, namespace):
r = self._request_url('GET', '/auth/namespaces/' + namespace + '/keys')
return r.json()
def add_namespace_key(self, namespace, key_name, key):
r = self._request_url('POST', '/auth/namespaces/' + namespace + '/keys',
data={'key_name': key_name, 'key': key})
return r.json()
def delete_namespace_key(self, namespace, key_name):
self._request_url(
'DELETE', '/auth/namespaces/' + namespace + '/keys/' + key_name)
def get_namespace_metadata(self, namespace):
r = self._request_url('GET', '/auth/namespaces/' + namespace +
'/metadata')
return r.json()
def set_namespace_metadata_item(self, namespace, key, value):
r = self._request_url('PUT', '/auth/namespaces/' + namespace +
'/metadata/' + key, data={'value': value})
return r.json()
def delete_namespace_metadata_item(self, namespace, key):
r = self._request_url(
'DELETE', '/auth/namespaces/' + namespace + '/metadata/' + key)
return r.json()
def get_existing_locks(self):
r = self._request_url('GET', '/admin/locks')
return r.json()
def ping(self, network_uuid, address):
r = self._request_url('GET', '/networks/' +
network_uuid + '/ping/' + address)
return r.json()
def create_upload(self):
r = self._request_url('POST', '/upload')
return r.json()
def send_upload(self, upload_uuid, data):
r = self._request_url('POST', '/upload/' + upload_uuid,
data=data, data_is_binary=True)
return r.json()
def get_user_agent():
sf_version = VersionInfo('shakenfist_client').version_string()
return 'Mozilla/5.0 (Ubuntu; Linux x86_64) Shaken Fist/%s' % sf_version
|
py | 7dfb4b4e1596b913723d8483fa38f478ca4488b8 | import inspect
import os
import warnings
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango41Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string, module_has_submodule
APPS_MODULE_NAME = 'apps'
MODELS_MODULE_NAME = 'models'
class AppConfig:
"""Class representing a Django application and its configuration."""
def __init__(self, app_name, app_module):
# Full Python path to the application e.g. 'django.contrib.admin'.
self.name = app_name
# Root module for the application e.g. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.py'>.
self.module = app_module
# Reference to the Apps registry that holds this AppConfig. Set by the
# registry when it registers the AppConfig instance.
self.apps = None
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application e.g. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
if not self.label.isidentifier():
raise ImproperlyConfigured(
"The app label '%s' is not a valid Python identifier." % self.label
)
# Human-readable name for the application e.g. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory e.g.
# '/path/to/django/contrib/admin'.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models e.g. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.py'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lowercase model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
@cached_property
def default_auto_field(self):
from django.conf import settings
return settings.DEFAULT_AUTO_FIELD
@property
def _is_default_auto_field_overridden(self):
return self.__class__.default_auto_field is not AppConfig.default_auto_field
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert to list because __path__ may not support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
else:
# For unknown reasons, sometimes the list returned by __path__
# contains duplicates that must be removed (#25246).
paths = list(set(paths))
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % module)
return paths[0]
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
# create() eventually returns app_config_class(app_name, app_module).
app_config_class = None
app_config_name = None
app_name = None
app_module = None
# If import_module succeeds, entry points to the app module.
try:
app_module = import_module(entry)
except Exception:
pass
else:
# If app_module has an apps submodule that defines a single
# AppConfig subclass, use it automatically.
# To prevent this, an AppConfig subclass can declare a class
# variable default = False.
# If the apps module defines more than one AppConfig subclass,
# the default one can declare default = True.
if module_has_submodule(app_module, APPS_MODULE_NAME):
mod_path = '%s.%s' % (entry, APPS_MODULE_NAME)
mod = import_module(mod_path)
# Check if there's exactly one AppConfig candidate,
# excluding those that explicitly define default = False.
app_configs = [
(name, candidate)
for name, candidate in inspect.getmembers(mod, inspect.isclass)
if (
issubclass(candidate, cls) and
candidate is not cls and
getattr(candidate, 'default', True)
)
]
if len(app_configs) == 1:
app_config_class = app_configs[0][1]
app_config_name = '%s.%s' % (mod_path, app_configs[0][0])
else:
# Check if there's exactly one AppConfig subclass,
# among those that explicitly define default = True.
app_configs = [
(name, candidate)
for name, candidate in app_configs
if getattr(candidate, 'default', False)
]
if len(app_configs) > 1:
candidates = [repr(name) for name, _ in app_configs]
raise RuntimeError(
'%r declares more than one default AppConfig: '
'%s.' % (mod_path, ', '.join(candidates))
)
elif len(app_configs) == 1:
app_config_class = app_configs[0][1]
app_config_name = '%s.%s' % (mod_path, app_configs[0][0])
# If app_module specifies a default_app_config, follow the link.
# default_app_config is deprecated, but still takes over the
# automatic detection for backwards compatibility during the
# deprecation period.
try:
new_entry = app_module.default_app_config
except AttributeError:
# Use the default app config class if we didn't find anything.
if app_config_class is None:
app_config_class = cls
app_name = entry
else:
message = (
'%r defines default_app_config = %r. ' % (entry, new_entry)
)
if new_entry == app_config_name:
message += (
'Django now detects this configuration automatically. '
'You can remove default_app_config.'
)
else:
message += (
"However, Django's automatic detection %s. You should "
"move the default config class to the apps submodule "
"of your application and, if this module defines "
"several config classes, mark the default one with "
"default = True." % (
"picked another configuration, %r" % app_config_name
if app_config_name
else "did not find this configuration"
)
)
warnings.warn(message, RemovedInDjango41Warning, stacklevel=2)
entry = new_entry
app_config_class = None
# If import_string succeeds, entry is an app config class.
if app_config_class is None:
try:
app_config_class = import_string(entry)
except Exception:
pass
# If both import_module and import_string failed, it means that entry
# doesn't have a valid value.
if app_module is None and app_config_class is None:
# If the last component of entry starts with an uppercase letter,
# then it was likely intended to be an app config class; if not,
# an app module. Provide a nice error message in both cases.
mod_path, _, cls_name = entry.rpartition('.')
if mod_path and cls_name[0].isupper():
# We could simply re-trigger the string import exception, but
# we're going the extra mile and providing a better error
# message for typos in INSTALLED_APPS.
# This may raise ImportError, which is the best exception
# possible if the module at mod_path cannot be imported.
mod = import_module(mod_path)
candidates = [
repr(name)
for name, candidate in inspect.getmembers(mod, inspect.isclass)
if issubclass(candidate, cls) and candidate is not cls
]
msg = "Module '%s' does not contain a '%s' class." % (mod_path, cls_name)
if candidates:
msg += ' Choices are: %s.' % ', '.join(candidates)
raise ImportError(msg)
else:
# Re-trigger the module import exception.
import_module(entry)
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(app_config_class, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
if app_name is None:
try:
app_name = app_config_class.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry
)
# Ensure app_name points to a valid module.
try:
app_module = import_module(app_name)
except ImportError:
raise ImproperlyConfigured(
"Cannot import '%s'. Check that '%s.%s.name' is correct." % (
app_name,
app_config_class.__module__,
app_config_class.__qualname__,
)
)
# Entry is a path to an app config class.
return app_config_class(app_name, app_module)
def get_model(self, model_name, require_ready=True):
"""
Return the model with the given case-insensitive model_name.
Raise LookupError if no model exists with this name.
"""
if require_ready:
self.apps.check_models_ready()
else:
self.apps.check_apps_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Return an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.apps.check_models_ready()
for model in self.models.values():
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
self.models = self.apps.all_models[self.label]
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
|
py | 7dfb4b98da1e78d55f588b31b2db23edcc320ef4 | import requests,base64,json,hashlib
from Crypto.Cipher import AES
def encrypt(key, text):
cryptor = AES.new(key.encode('utf8'), AES.MODE_CBC, b'0102030405060708')
length = 16
count = len(text.encode('utf-8'))
if (count % length != 0):
add = length - (count % length)
else:
add = 16
pad = chr(add)
text1 = text + (pad * add)
ciphertext = cryptor.encrypt(text1.encode('utf8'))
cryptedStr = str(base64.b64encode(ciphertext),encoding='utf-8')
return cryptedStr
def md5(str):
hl = hashlib.md5()
hl.update(str.encode(encoding='utf-8'))
return hl.hexdigest()
def protect(text):
return {"params":encrypt('TA3YiYCfY2dDJQgg',encrypt('0CoJUm6Qyw8W8jud',text)),"encSecKey":"84ca47bca10bad09a6b04c5c927ef077d9b9f1e37098aa3eac6ea70eb59df0aa28b691b7e75e4f1f9831754919ea784c8f74fbfadf2898b0be17849fd656060162857830e241aba44991601f137624094c114ea8d17bce815b0cd4e5b8e2fbaba978c6d1d14dc3d1faf852bdd28818031ccdaaa13a6018e1024e2aae98844210"}
s=requests.Session()
header={}
url="https://music.163.com/weapi/login/cellphone"
url2="https://music.163.com/weapi/point/dailyTask"
url3="https://music.163.com/weapi/v1/discovery/recommend/resource"
logindata={
"phone":input(),
"countrycode":"86",
"password":md5(input()),
"rememberLogin":"true",
}
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
"Referer" : "http://music.163.com/",
"Accept-Encoding" : "gzip, deflate",
}
headers2 = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
"Referer" : "http://music.163.com/",
"Accept-Encoding" : "gzip, deflate",
"Cookie":"os=pc; osver=Microsoft-Windows-10-Professional-build-10586-64bit; appver=2.0.3.131777; channel=netease; __remember_me=true;"
}
res=s.post(url=url,data=protect(json.dumps(logindata)),headers=headers2)
tempcookie=res.cookies
object=json.loads(res.text)
if object['code']==200:
print("登录成功!")
else:
print("登录失败!请检查密码是否正确!"+str(object['code']))
exit(object['code'])
res=s.post(url=url2,data=protect('{"type":0}'),headers=headers)
object=json.loads(res.text)
if object['code']!=200 and object['code']!=-2:
print("签到时发生错误:"+object['msg'])
else:
if object['code']==200:
print("签到成功,经验+"+str(object['point']))
else:
print("重复签到")
res=s.post(url=url3,data=protect('{"csrf_token":"'+requests.utils.dict_from_cookiejar(tempcookie)['__csrf']+'"}'),headers=headers)
object=json.loads(res.text,strict=False)
for x in object['recommend']:
url='https://music.163.com/weapi/v3/playlist/detail?csrf_token='+requests.utils.dict_from_cookiejar(tempcookie)['__csrf']
data={
'id':x['id'],
'n':1000,
'csrf_token':requests.utils.dict_from_cookiejar(tempcookie)['__csrf'],
}
res=s.post(url,protect(json.dumps(data)),headers=headers)
object=json.loads(res.text,strict=False)
buffer=[]
count=0
for j in object['playlist']['trackIds']:
data2={}
data2["action"]="play"
data2["json"]={}
data2["json"]["download"]=0
data2["json"]["end"]="playend"
data2["json"]["id"]=j["id"]
data2["json"]["sourceId"]=""
data2["json"]["time"]="240"
data2["json"]["type"]="song"
data2["json"]["wifi"]=0
buffer.append(data2)
count+=1
if count>=310:
break
if count>=310:
break
url = "http://music.163.com/weapi/feedback/weblog"
postdata={
"logs":json.dumps(buffer)
}
res=s.post(url,protect(json.dumps(postdata)))
object=json.loads(res.text,strict=False)
if object['code']==200:
print("刷单成功!共"+str(count)+"首")
exit()
else:
print("发生错误:"+str(object['code'])+object['message'])
exit(object['code'])
|
py | 7dfb4be33f7aba757171644dbe4a8b24876a1245 | if __name__ == '__main__':
from userbot import main
|
py | 7dfb4cb0cb620c1eaeb890201b48b36fba3bad1b | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 23:14:11 2019
@author: Parikshith.H
"""
class vehicle:
num=10 #class variables/static variables-assigned a value inside class declaration
#static variable does not belong to object it belongs to class
#static variable is shared among objects
#static variables exists throughout the program
def __init__(self,w,t):
self.__wheel=w
self.__type=t
self.__number=vehicle.num
vehicle.num=vehicle.num+1
def display(self):
print(self.__wheel,self.__type,self.__number)
print(vehicle.num)
car1=vehicle(4,'petrol')
car1.display()
# =============================================================================
# #output:
# 10
# 4 petrol 10
# =============================================================================
print(vehicle.num)
car2=vehicle(4,'diesel')
car2.display()
# =============================================================================
# #output:
# 11
# 4 diesel 11
# =============================================================================
print(vehicle.num)
# =============================================================================
# #output:
# 12
# =============================================================================
class student:
id=1
def __init__(self,n,ph):
self.__name=n
self.__phone=ph
self.__usn='4VV16CS' + str(student.id)
student.id=student.id+1
def display(self):
print(self.__name,self.__phone,self.__usn)
student1=student('a',9988776655)
student1.display()
# =============================================================================
# #output:
# a 9988776655 4VV16CS1
# =============================================================================
student2=student('b',9966554433)
student2.display()
# =============================================================================
# #output:
# b 9966554433 4VV16CS2
# =============================================================================
|
py | 7dfb4d9f5e61625f583cfc6211149f533f9462df | import numpy as np
import scipy.signal
from gym.spaces import Box, Discrete
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
class split_model(nn.Module):
def __init__(self,sizes, activation, output_activation=nn.Identity,split_index=-1):
super(split_model, self).__init__()
self.mlp1 = mlp(sizes, activation, output_activation)
self.mlp2 = mlp(sizes, activation, output_activation)
self.split_index = split_index
def forward(self, x):
if len(list(x.size()))==1:
x = x.unsqueeze(0)
split = torch.eq(x[:,self.split_index],1).unsqueeze(1)
x = split.float()*self.mlp1(x)+torch.logical_not(split).float()*self.mlp2(x)
return x
def mlp_switch(sizes, activation, output_activation=nn.Identity,split_index=-1):
return split_model(sizes, activation, output_activation,split_index=split_index)
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class Actor(nn.Module):
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward(self, obs, act=None):
# Produce action distributions for given observations, and
# optionally compute the log likelihood of given actions under
# those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class MLPCategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation,use_split=False):
super().__init__()
if not use_split:
self.logits_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
else:
self.logits_net = mlp_switch([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
logits = self.logits_net(obs)
return Categorical(logits=logits)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act)
class MLPGaussianActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation,use_split=False):
super().__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
if not use_split:
self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
else:
self.mu_net = mlp_switch([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution
class MLPCritic(nn.Module):
def __init__(self, obs_dim, hidden_sizes, activation,use_split=False):
super().__init__()
if not use_split:
self.v_net = mlp([obs_dim] + list(hidden_sizes) + [1], activation)
else:
self.v_net = mlp_switch([obs_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs):
return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
class MLPActorCritic(nn.Module):
def __init__(self, observation_space, action_space,
hidden_sizes=(64,64), activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
# policy builder depends on action space
if isinstance(action_space, Box):
self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation)
elif isinstance(action_space, Discrete):
self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation)
# build value function
self.v = MLPCritic(obs_dim, hidden_sizes, activation)
def step(self, obs):
with torch.no_grad():
pi = self.pi._distribution(obs)
a = pi.sample()
logp_a = self.pi._log_prob_from_distribution(pi, a)
v = self.v(obs)
return a.numpy(), v.numpy(), logp_a.numpy()
def act(self, obs):
return self.step(obs)[0]
class MLPActorCriticSplit(nn.Module):
def __init__(self, observation_space, action_space,
hidden_sizes=(64,64), activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
# policy builder depends on action space
if isinstance(action_space, Box):
self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation,use_split=True)
elif isinstance(action_space, Discrete):
self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation,use_split=True)
# build value function
self.v = MLPCritic(obs_dim, hidden_sizes, activation,use_split=True)
def step(self, obs):
with torch.no_grad():
pi = self.pi._distribution(obs)
a = pi.sample()
logp_a = self.pi._log_prob_from_distribution(pi, a)
v = self.v(obs)
return a.numpy(), v.numpy(), logp_a.numpy()
def act(self, obs):
return self.step(obs)[0] |
py | 7dfb4e2b76dbe808daef4748d2c650e81c560300 | r"""``sphobjinv._vendored`` *package definition module*.
``sphobjinv`` is a toolkit for manipulation and inspection of
Sphinx |objects.inv| files.
Subpackage marker module for vendored packages.
**Author**
Brian Skinn ([email protected])
**File Created**
11 Dec 2021
**Copyright**
\(c) Brian Skinn 2016-2022
**Source Repository**
https://github.com/bskinn/sphobjinv
**Documentation**
https://sphobjinv.readthedocs.io/en/latest
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
""" |
py | 7dfb4e6f29a00ad42e0fd96496208166c92cc6fc | small_bottles = float(input("Inserisci il numero di bottiglie piccole"))
big_bottles = float(input("Inserisci il numero di bottiglie grandi"))
valore_totale=(small_bottles*0.1)+(big_bottles*0.25)
print("hai guadagnato:",float(valore_totale)) |
py | 7dfb4eb2df81fb7cd4de3ab58b11bddd1f89fcc1 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import requests
ensembl_server = 'http://rest.ensembl.org'
def do_request(server, service, *args, **kwargs):
url_params = ''
for a in args:
if a is not None:
url_params += '/' + a
req = requests.get('%s/%s%s' % (server, service, url_params),
params=kwargs,
headers={'Content-Type': 'application/json'})
if not req.ok:
req.raise_for_status()
return req.json()
# -
answer = do_request(ensembl_server, 'info/species')
for i, sp in enumerate(answer['species']):
print(i, sp['name'])
ext_dbs = do_request(ensembl_server, 'info/external_dbs', 'homo_sapiens', filter='HGNC%')
print(ext_dbs)
answer = do_request(ensembl_server, 'lookup/symbol', 'homo_sapiens', 'LCT')
print(answer)
lct_id = answer['id']
lct_seq = do_request(ensembl_server, 'sequence/id', lct_id)
print(lct_seq)
lct_xrefs = do_request(ensembl_server, 'xrefs/id', lct_id)
for xref in lct_xrefs:
print(xref['db_display_name'])
print(xref)
refs = do_request(ensembl_server, 'xrefs/id', lct_id, external_db='GO', all_levels='1')
print(lct_id, refs)
hom_response = do_request(ensembl_server, 'homology/id', lct_id, type='orthologues', sequence='none')
#print(hom_response['data'][0]['homologies'])
homologies = hom_response['data'][0]['homologies']
for homology in homologies:
print(homology['target']['species'])
if homology['target']['species'] != 'equus_caballus':
continue
print(homology)
print(homology['taxonomy_level'])
horse_id = homology['target']['id']
horse_req = do_request(ensembl_server, 'lookup/id', horse_id)
print(horse_req)
# +
#maybe synteny of MCM6 and LCT with caballus and gorilla
|
py | 7dfb4ee8ce4ac0e661e3c5aefd5c86db8d6e3159 | # -*- coding: utf-8 -*-
"""
Tools for dealing with bibliographic information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .bibtex import BibtexParser, parse_bibtex
from .person import PersonName
from .xmp import XmpParser, parse_xmp
|
py | 7dfb4ef28bf7c47a35d6fe8dfa51bfe3200ed1d2 | import logging
import re
import six # Python 2+3 compatibility
from .glyphlist import glyphname2unicode
from .latin_enc import ENCODING
from .psparser import PSLiteral
from .additional_glyphlist import glyphname2unicode as additional_glyphs
glyphname2unicode.update(additional_glyphs)
HEXADECIMAL = re.compile(r'[0-9a-fA-F]+')
log = logging.getLogger(__name__)
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers.
In contrast to the specification, this raises a KeyError instead of return an empty string when the key is unknown.
This way the caller must explicitly define what to do when there is not a match.
Reference: https://github.com/adobe-type-tools/agl-specification#2-the-mapping
:returns unicode character if name resembles something, otherwise a KeyError
"""
if name in glyphname2unicode:
return glyphname2unicode.get(name)
name = name.split('.')[0]
components = name.split('_')
if len(components) > 1:
return ''.join(map(name2unicode, components))
elif name.startswith('uni'):
name_without_uni = name.strip('uni')
if HEXADECIMAL.match(name_without_uni) and len(name_without_uni) % 4 == 0:
unicode_digits = [int(name_without_uni[i:i + 4], base=16) for i in range(0, len(name_without_uni), 4)]
for digit in unicode_digits:
raise_key_error_for_invalid_unicode(digit)
characters = map(six.unichr, unicode_digits)
return ''.join(characters)
elif name.startswith('u'):
name_without_u = name.strip('u')
if HEXADECIMAL.match(name_without_u) and 4 <= len(name_without_u) <= 6:
unicode_digit = int(name_without_u, base=16)
raise_key_error_for_invalid_unicode(unicode_digit)
return six.unichr(unicode_digit)
# taken from:
# https://github.com/apache/pdfbox/blob/3d6d6631b50b92d864f07068cb1566f8e4bec9ab/pdfbox/src/main/java/org/apache/pdfbox/encoding/Encoding.java
# this encoding is used in pdfs generated with TeX/LateX
elif len(name) <= 4 and (name.startswith("x") or name.startswith("a")):
try:
value = int(name[1:], base=16 if name.startswith("x") else 10)
# add some additional mapping for values < 32 and = 127
if 0 <= value <= 9:
value += 161
elif 10 <= value < 32:
value += 163
elif value == 127:
value = 196
character = chr(value)
glyphname2unicode[name] = character
return character
except ValueError:
log.debug("Not a number in character name: " + name)
raise KeyError('Could not convert unicode name "%s" to character because it does not match specification' % name)
def raise_key_error_for_invalid_unicode(unicode_digit):
"""Unicode values should not be in the range D800 through DFFF because that is used for surrogate pairs in UTF-16
:raises KeyError if unicode digit is invalid
"""
if 55295 < unicode_digit < 57344:
raise KeyError('Unicode digit %d is invalid because it is in the range D800 through DFFF' % unicode_digit)
class EncodingDB(object):
std2unicode = {}
mac2unicode = {}
win2unicode = {}
pdf2unicode = {}
for (name, std, mac, win, pdf) in ENCODING:
c = name2unicode(name)
if std:
std2unicode[std] = c
if mac:
mac2unicode[mac] = c
if win:
win2unicode[win] = c
if pdf:
pdf2unicode[pdf] = c
encodings = {
'StandardEncoding': std2unicode,
'MacRomanEncoding': mac2unicode,
'WinAnsiEncoding': win2unicode,
'PDFDocEncoding': pdf2unicode,
}
@classmethod
def get_encoding(klass, name, diff=None):
cid2unicode = klass.encodings.get(name, klass.std2unicode)
if diff:
cid2unicode = cid2unicode.copy()
cid = 0
for x in diff:
if isinstance(x, int):
cid = x
elif isinstance(x, PSLiteral):
try:
cid2unicode[cid] = name2unicode(x.name)
except KeyError as e:
log.debug(str(e))
cid += 1
return cid2unicode
|
py | 7dfb4f7c7576bb983a89105cd460fa43e3bce1a8 | from django import template
import re
from django.forms.fields import DateField
register = template.Library()
###
### Counts the total of probe readings and water history
### records (forms) in a unified_field_data.UnifiedReport record
@register.filter(expects_localtime=True)
def day_records(day_records):
return len(day_records.all_forms)
@register.filter(expects_localtime=True)
def form_index(form_id):
m = re.search( "\d+", str(form_id))
return m.group(0)
@register.filter(expects_localtime=True)
def time_format(time_str):
## Easier and probably faster than dealing with Python's datetime class
m = re.search("(\d+:\d+)", str(time_str))
return m.group(0)
|
py | 7dfb4f7e6f8d6e99d854b37db22e935bb99c2e61 | # -*- coding: UTF-8 -*-
"""
A scraper utility package
"""
__author__ = "the01"
__email__ = "[email protected]"
__copyright__ = "Copyright (C) 2014-19, Florian JUNG"
__license__ = "MIT"
__version__ = "0.3.1"
__date__ = "2019-08-04"
from .webscraper import WebScraper, default_user_agents, \
WEBConnectException, WEBFileException, WEBParameterException
from .cache import Cache
from .models import Response, CacheInfo
__all__ = [
"webscraper", "WebScraper", "Cache", "Response", "CacheInfo"
]
|
py | 7dfb503cb9ad540fab1cccae20866c1886019ac8 | # coding: utf-8
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
from guitar.env import *
from kerasy.utils import chooseTextColor, handleTypeError
from ..env import LEN_OCTAVES
def get_notes2color(theme="rainbow"):
notes2color={}
for i,note in enumerate(NOTES):
rgba = cm.cmap_d.get(theme)(i/LEN_OCTAVES)
notes2color[note] = (rgba, chooseTextColor(rgb=rgba[:3], ctype="rgb", max_val=1))
return notes2color
def plot_notes_color_theme(theme="rainbow", radius=0.3, fontsize=20, title=True, ax=None, fig=None):
if isinstance(theme, matplotlib.colors.ListedColormap) or \
isinstance(theme, matplotlib.colors.LinearSegmentedColormap):
cmap = theme
theme = cmap.name
elif isinstance(theme, str):
cmap = cm.cmap_d.get(theme)
else:
handleTypeError(types=[str, matplotlib.colors.ListedColormap, matplotlib.colors.LinearSegmentedColormap], theme=theme)
if ax is None:
fig, ax = plt.subplots(figsize=(LEN_OCTAVES,1))
ax.set_xlim(-0.5, 11.5)
if title: ax.set_title(theme)
# Plot notes with color.
for i,note in enumerate(NOTES):
rgba = cmap(i/LEN_OCTAVES)
fc = chooseTextColor(rgba[:3], ctype="rgb", max_val=1)
ax.add_patch(mpatches.Circle(xy=(i, 0), radius=radius, color=rgba))
ax.annotate(text=note, xy=(i, 0), color=fc, weight='bold', fontsize=fontsize, ha='center', va='center')
# Adjust for different sized figures.
if fig is not None:
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
w,h = bbox.width, bbox.height
height = 1/2 * h * (LEN_OCTAVES/w)
ax.set_ylim(-height, height)
return ax
def plot_notes_all_color_theme(radius=0.3, fontsize=20):
for name, cmap in cm.cmap_d.items():
plot_notes_color_theme(cmap, radius=radius, fontsize=fontsize) |
py | 7dfb516b1d1ecae8683d75152f7d568733a769ff | import pandas as pd
import folium.folium as folium
import itertools
import numpy as np
import logging
import geojson as gj
import copy
import attrdict as ad
# import emission.analysis.classification.cleaning.location_smoothing as ls
import bson.json_util as bju
import emission.storage.decorations.location_queries as lq
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.place_queries as esdp
import emission.storage.decorations.stop_queries as esds
import emission.storage.decorations.section_queries as esdsc
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.stop as ecws
import emission.core.wrapper.section as ecwsc
import emission.analysis.plotting.geojson.geojson_feature_converter as gfc
import emission.analysis.plotting.leaflet_osm.folium_geojson_plugin as fgjp
import emission.storage.timeseries.timequery as estt
import emission.net.api.usercache as enau
all_color_list = ['black', 'brown', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'navy', 'pink', 'purple', 'red', 'snow', 'yellow']
sel_color_list = ['black', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'pink', 'purple', 'red', 'yellow']
def df_to_string_list(df):
"""
Convert the input df into a list of strings, suitable for using as popups in a map.
This is a utility function.
"""
# print "Converting df with size %s to string list" % df.shape[0]
array_list = df.to_dict(orient='records')
return [str(line) for line in array_list]
def get_maps_for_range(user_id, start_ts, end_ts):
map_list = []
geojson_list = gfc.get_geojson_for_ts(user_id, start_ts, end_ts)
return get_maps_for_geojson_list(geojson_list)
def get_maps_for_usercache(user_id):
from functional import seq
data_to_phone = seq(enau.sync_server_to_phone(user_id))
logging.debug("Before pipeline, trips to phone list has length %d" % len(data_to_phone.to_list()))
logging.debug("keys are %s" % data_to_phone.map(lambda e: ad.AttrDict(e).metadata.key))
trips_to_phone = data_to_phone.map(lambda e: ad.AttrDict(e))\
.filter(lambda e: e.metadata.key.startswith("diary/trips")) \
.map(lambda e: e.data)
logging.debug("After pipeline, trips to phone list has length %d" % len(trips_to_phone.to_list()))
# logging.debug("trips_to_phone = %s" % trips_to_phone)
maps_for_day = []
for day in trips_to_phone:
maps_for_day.append(get_maps_for_geojson_list(day))
return maps_for_day
def get_maps_for_geojson_list(trip_geojson_list):
map_list = []
for trip_doc in trip_geojson_list:
# logging.debug(trip_doc)
trip_geojson = ad.AttrDict(trip_doc)
logging.debug("centering based on start = %s, end = %s " % (trip_geojson.features[0], trip_geojson.features[1]))
flipped_midpoint = lambda(p1, p2): [(p1.coordinates[1] + p2.coordinates[1])/2,
(p1.coordinates[0] + p2.coordinates[0])/2]
curr_map = folium.Map(flipped_midpoint((trip_geojson.features[0].geometry,
trip_geojson.features[1].geometry)))
curr_plugin = fgjp.FoliumGeojsonPlugin(dict(trip_geojson))
curr_map.add_plugin(curr_plugin)
map_list.append(curr_map)
return map_list
def flipped(coord):
return (coord[1], coord[0])
def get_center_for_map(coords):
# logging.debug(trip_geojson)
midpoint = lambda(p1, p2): [(p1[0] + p2[0])/2,
(p1[1] + p2[1])/2]
if len(coords) == 0:
return None
if len(coords) == 1:
return flipped(coords)
if len(coords) > 0:
logging.debug("Getting midpoint of %s and %s" % (coords[0], coords[-1]))
return flipped(midpoint((coords[0], coords[-1])))
def get_maps_for_geojson_unsectioned(feature_list):
map_list = []
for feature in feature_list:
# logging.debug("Getting map for feature %s" % bju.dumps(feature))
feature_coords = list(get_coords(feature))
# feature_coords = list(gj.utils.coords(feature))
curr_map = folium.Map(get_center_for_map(feature_coords))
curr_plugin = fgjp.FoliumGeojsonPlugin(dict(feature))
curr_map.add_plugin(curr_plugin)
map_list.append(curr_map)
return map_list
def get_coords(feature):
# logging.debug("Getting coordinates for feature %s" % bju.dumps(feature))
if feature["type"] == "FeatureCollection":
retVal = []
for f in feature["features"]:
retVal.extend(get_coords(f))
return retVal
else:
return gj.utils.coords(feature)
def get_maps_for_range_old(user_id, start_ts, end_ts):
# First, get the timeline for that range.
ts = esta.TimeSeries.get_time_series(user_id)
trip_list = esdt.get_trips(user_id, estt.TimeQuery("data.start_ts", start_ts, end_ts))
# TODO: Should the timeline support random access as well?
# If it did, we wouldn't need this additional map
# I think that it would be good to support a doubly linked list, i.e. prev and next in addition
# to the iteration interface
place_list = esdp.get_places(user_id, estt.TimeQuery("data.exit_ts", start_ts, end_ts))
place_list = place_list + (esdp.get_places(user_id, estt.TimeQuery("data.enter_ts", start_ts, end_ts)))
place_map = dict([(p.get_id(), p) for p in place_list])
map_list = []
flipped_midpoint = lambda(p1, p2): [(p1.coordinates[1] + p2.coordinates[1])/2,
(p1.coordinates[0] + p2.coordinates[0])/2]
for i, trip in enumerate(trip_list):
logging.debug("-" * 20 + trip.start_fmt_time + "=>" + trip.end_fmt_time
+ "(" + str(trip.end_ts - trip.start_ts) + ")")
if (len(esdt.get_raw_sections_for_trip(user_id, trip.get_id())) == 0 and
len(esdt.get_raw_stops_for_trip(user_id, trip.get_id())) == 0):
logging.debug("Skipping trip because it has no stops and no sections")
continue
start_point = gj.GeoJSON.to_instance(trip.start_loc)
end_point = gj.GeoJSON.to_instance(trip.end_loc)
curr_map = folium.Map(flipped_midpoint((start_point, end_point)))
map_list.append(curr_map)
logging.debug("About to display places %s and %s" % (trip.start_place, trip.end_place))
update_place(curr_map, trip.start_place, place_map, marker_color='green')
update_place(curr_map, trip.end_place, place_map, marker_color='red')
# TODO: Should get_timeline_for_trip work on a trip_id or on a trip object
# it seems stupid to convert trip object -> id -> trip object
curr_trip_timeline = esdt.get_raw_timeline_for_trip(user_id, trip.get_id())
for i, trip_element in enumerate(curr_trip_timeline):
# logging.debug("Examining element %s of type %s" % (trip_element, type(trip_element)))
if type(trip_element) == ecws.Stop:
time_query = esds.get_time_query_for_stop(trip_element.get_id())
logging.debug("time_query for stop %s = %s" % (trip_element, time_query))
stop_points_df = ts.get_data_df("background/filtered_location", time_query)
# logging.debug("stop_points_df.head() = %s" % stop_points_df.head())
if len(stop_points_df) > 0:
update_line(curr_map, stop_points_df, line_color = sel_color_list[-1],
popup="%s -> %s" % (trip_element.enter_fmt_time, trip_element.exit_fmt_time))
else:
assert(type(trip_element) == ecwsc.Section)
time_query = esdsc.get_time_query_for_section(trip_element.get_id())
logging.debug("time_query for section %s = %s" %
(trip_element, "[%s,%s,%s]" % (time_query.timeType, time_query.startTs, time_query.endTs)))
section_points_df = ts.get_data_df("background/filtered_location", time_query)
logging.debug("section_points_df.tail() = %s" % section_points_df.tail())
if len(section_points_df) > 0:
update_line(curr_map, section_points_df, line_color = sel_color_list[trip_element.sensed_mode.value],
popup="%s (%s -> %s)" % (trip_element.sensed_mode, trip_element.start_fmt_time,
trip_element.end_fmt_time))
else:
logging.warn("found no points for section %s" % trip_element)
return map_list
def update_place(curr_map, place_id, place_map, marker_color='blue'):
if place_id is not None and place_id in place_map:
place = place_map[place_id]
logging.debug("Retrieved place %s" % place)
if hasattr(place, "location"):
coords = copy.copy(place.location.coordinates)
coords.reverse()
logging.debug("Displaying place at %s" % coords)
curr_map.simple_marker(location=coords, popup=str(place), marker_color=marker_color)
else:
logging.debug("starting place has no location, skipping")
else:
logging.warn("place not mapped because place_id = %s and place_id in place_map = %s" % (place_id, place_id in place_map))
def update_line(currMap, line_points, line_color = None, popup=None):
currMap.div_markers(line_points[['latitude', 'longitude']].as_matrix().tolist(),
df_to_string_list(line_points), marker_size=5)
currMap.line(line_points[['latitude', 'longitude']].as_matrix().tolist(),
line_color = line_color,
popup = popup)
##########################
# Everything below this line is from the time when we were evaluating
# segmentation and can potentially be deleted. It is also likely to have bitrotted.
# Let's hold off a bit on that until we have the replacement, though
##########################
def get_map_list(df, potential_splits):
mapList = []
potential_splits_list = list(potential_splits)
for start, end in zip(potential_splits_list, potential_splits_list[1:]):
trip = df[start:end]
print "Considering trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end)
if end - start < 4:
# If there are only 3 entries, that means that there is only one
# point other than the start and the end, bail
print "Ignoring trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end)
continue
mapList.append(get_map(trip))
return mapList
def get_map_list_after_segmentation(section_map, outlier_algo = None, filter_algo = None):
mapList = []
for trip, section_list in section_map:
logging.debug("%s %s -> %s %s" % ("=" * 20, trip.start_time, trip.end_time, "=" * 20))
trip_df = lq.get_points_for_section(trip)
curr_map = folium.Map([trip_df.mLatitude.mean(), trip_df.mLongitude.mean()])
last_section_end = None
for (i, section) in enumerate(section_list):
logging.debug("%s %s: %s -> %s %s" %
("-" * 20, i, section.start_time, section.end_time, "-" * 20))
raw_section_df = trip_df[np.logical_and(trip_df.mTime >= section.start_ts,
trip_df.mTime <= section.end_ts)]
section_df = ls.filter_points(raw_section_df, outlier_algo, filter_algo)
if section_df.shape[0] == 0:
logging.info("Found empty df! skipping...")
continue
logging.debug("for section %s, section_df.shape = %s, formatted_time.head() = %s" %
(section, section_df.shape, section_df["formatted_time"].head()))
update_map(curr_map, section_df, line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity))
if section_df.shape[0] > 0:
curr_section_start = section_df.iloc[0]
if i != 0 and last_section_end is not None:
# We want to join this to the previous section.
curr_map.line([[last_section_end.mLatitude, last_section_end.mLongitude],
[curr_section_start.mLatitude, curr_section_start.mLongitude]],
line_color = sel_color_list[-1],
popup = "%s -> %s" % (section_list[i-1].activity, section.activity))
last_section_end = section_df.iloc[-1]
mapList.append(curr_map)
return mapList
def get_map(section_points, line_color = None, popup=None):
currMap = folium.Map([section_points.mLatitude.mean(), section_points.mLongitude.mean()])
update_map(currMap, section_points, line_color, popup)
return currMap
def update_map(currMap, section_points, line_color = None, popup=None):
currMap.div_markers(section_points[['mLatitude', 'mLongitude']].as_matrix().tolist(),
df_to_string_list(section_points), marker_size=5)
currMap.line(section_points[['mLatitude', 'mLongitude']].as_matrix().tolist(),
line_color = line_color,
popup = popup)
def evaluate_filtering(section_list, outlier_algos, filtering_algos):
"""
TODO: Is this the best place for this? If not, what is?
It almost seems like we need to have a separate evaluation module that is
separate from the plotting and the calculation modules.
But then, what is the purpose of this module?
"""
nCols = 2 + len(outlier_algos) * len(filtering_algos)
nRows = len(section_list)
map_list = []
for section in section_list:
curr_compare_list = []
section_df = ls.get_section_points(section)
curr_compare_list.append(get_map(section_df))
curr_compare_list.append(get_map(ls.filter_points(section_df, None, None)))
for (oa, fa) in itertools.product(outlier_algos, filtering_algos):
curr_filtered_df = ls.filter_points(section_df, oa, fa)
print ("After filtering with %s, %s, size is %s" % (oa, fa, curr_filtered_df.shape))
if "activity" in section:
curr_compare_list.append(get_map(curr_filtered_df,
line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity)))
else:
curr_compare_list.append(get_map(curr_filtered_df))
assert(len(curr_compare_list) == nCols)
map_list.append(curr_compare_list)
assert(len(map_list) == nRows)
return map_list
|
py | 7dfb52b70f5e0eae9b451f34531d6fc651a4c8ef | import numpy as np
from scipy.special import gammaln
from aux import addrandomtopic,dictionate
import time
data=np.array([[0,0],
[0,1],
[1,0],
[1,1],
[0,1],
[2,2],
[2,1],
[1,2],
[0,1],
[1,1],
[0,1],
[2,3],
[3,3],
[3,2],
[3,4],
[4,3],
[4,4],
[3,3],
[3,3],
[3,2],
[1,1],
[1,0],
[4,4],
[4,3],
[1,1],
[1,0],
[1,2],
[2,1],
[0,1],
[0,1],
[2,2],
[4,3],
[3,5],
[4,3],
[3,2],
[2,4],
[4,3],
[3,3],
[4,3],
[4,3],
[4,3],
[1,4]])
class Model:
def __init__(self,data,alpha,beta):
#** Preprocess the data
self.data,idx2vals,vals2idx,self.counts=dictionate(data) #self.data is dictionated data
self.V=len(idx2vals[0]) # Total number of observed variables in V
self.W=len(idx2vals[1]) # Total number of observed variables in W
self.alpha=alpha
self.beta=beta
# Global parameters
self.currV=0 # Current number of observed variables in V
self.currW=0 # Current number of observed variables in W
self.Vs=set() # Set of Vs
self.Ws=set() # Set of Ws
self.K=0 # Current number of existing K
self.nvk_=np.zeros((self.V,self.K))
self.n_kw=np.zeros((self.W,self.K))
self.n_k_=np.zeros(self.K)
self.sum_N=0
self.P_new=self.alpha
# Remove empty columns from structure with the exception of the first column
def removeEmptyCols(self,idx):
assert(np.sum(self.n_kw[:][:,idx]) == 0 and
np.sum(self.nvk_[:][:,idx]) == 0 and
self.n_k_[idx] == 0 or
(np.sum(self.n_kw[:][:,idx]) != 0 and
np.sum(self.nvk_[:][:,idx]) != 0 and
self.n_k_[idx] != 0))
if np.sum(self.n_kw[:][:,idx]) == 0:
self.n_kw=np.delete(self.n_kw,(idx),axis=1)
self.nvk_=np.delete(self.nvk_,(idx),axis=1)
self.n_k_=np.delete(self.n_k_,(idx))
self.sum_N=np.delete(self.sum_N,(idx))
self.data.T[-1][self.data.T[-1]>idx]-=1
self.K-=1
def update_topic(self,rowid,it):
x,y,currk=self.data[rowid]
#**1. Leave from Current Topic
self.n_kw[y][currk]-=1
self.nvk_[x][currk]-=1
self.n_k_[currk]-=1
# While observing the data construct Set of W and V
if it==0:
self.Ws.add(y)
self.Vs.add(x)
self.P_new=self.alpha/(len(self.Ws)*len(self.Vs))**2
self.sum_N=2*self.n_k_+len(self.Ws)*len(self.Vs)*self.beta
else:
self.sum_N[currk]-=2
W_=len(self.Ws)*1.0
V_=len(self.Vs)*1.0
if currk>0:
#currk needs to be updated as well
self.removeEmptyCols(currk)
Nxy=self.nvk_[x]/W_+self.n_kw[y]/V_+self.beta
log_Nvu=np.log(self.nvk_/W_+self.n_kw[y]/V_+self.beta+1)
log_Nxw=np.log(self.nvk_[x]/W_+self.n_kw/V_+self.beta+1)
#* Compute the terms used for calculating the posterior
A=gammaln(self.sum_N)-gammaln(self.sum_N+W_+V_)
B=gammaln(Nxy+2)-gammaln(Nxy)
C=np.sum(log_Nvu,0)+np.sum(log_Nxw,0)
log_p_z=A+B+C
p_z = np.exp(log_p_z-log_p_z.max()) # it may be optimized if p_z[0] is screwing up
p_z = np.multiply(self.n_k_,p_z)
p_z[0] = self.P_new
p_z = p_z / p_z.sum()
newk=np.random.multinomial(1, p_z / p_z.sum()).argmax()
if newk==0:
self.K+=1
self.n_kw=np.hstack((self.n_kw,np.zeros((self.W,1))))
self.nvk_=np.hstack((self.nvk_,np.zeros((self.V,1))))
self.n_k_=np.hstack((self.n_k_,0))
self.sum_N=np.hstack((self.sum_N,0))
#* Sits at Last Table
self.n_kw[y][-1]+=1
self.nvk_[x][-1]+=1
self.n_k_[-1]+=1
self.sum_N[-1]=2+len(self.Ws)*len(self.Vs)*self.beta
self.data[rowid][-1]=self.K
else:
#* Sits at New Table
self.n_kw[y][newk]+=1
self.nvk_[x][newk]+=1
self.n_k_[newk]+=1
self.data[rowid][-1]=newk
if it>0:
self.sum_N[newk]+=2
def inference(self,iterations_max):
#** Initialize the topics
self.data=np.hstack((self.data,np.zeros((np.shape(self.data)[0],1))))
self.data=np.asarray(np.asarray(self.data,dtype=np.float),dtype=np.int)
#** Initialize the book-keeping
self.nvk_=np.array([self.counts[0]]).T
self.n_kw=np.array([self.counts[1]]).T
self.n_k_=np.array([np.shape(self.data)[0]])
#** MAIN LOOP
for it in range(iterations_max):
for rowid in range(len(self.data)):
self.update_topic(rowid,it)
print "Iteration",it,"Number of topics",len(self.n_k_)-1
self.printTopics()
print "\nTopic Allocations"
print self.data
def loglikelihood(self):
return 0
def printTopics(self):
ntopics=len(self.n_k_)-1
topics=[]
for i in range(ntopics):
topics.append(np.zeros((self.V,self.W)))
for row in self.data:
x,y,t=row
topics[t-1][x][y]+=1 # given the fact that 0 is not a topic
for i,topic in enumerate(topics):
np.save("topic"+str(i),topic)
print "\nTopic "+str(i)+"------------------------ \n",topic
print "Row Topic : ",np.around(np.sum(topic,axis=0),decimals=1)
print "Column Topic: ",np.around(np.sum(topic,axis=1),decimals=1)
if __name__=="__main__":
alpha=0.01 #>0.00001- NIPS or > 0.01 - small toy
beta=1.0 #150.0 - NIPS or ~1.2- small toy
iterations=30
m= Model(data,alpha,beta)
m.inference(iterations)
print "Likelihood",m.loglikelihood()
|
py | 7dfb5325729542d75222eb66fcbf2893dacf537c | from django.db import models
# Create your models here.
class Espetaculos(models.Model):
titulo = models.CharField(max_length=255)
def __str__(self):
return self.titulo
|
py | 7dfb53425b6a823a8228730878003f88a186537e | """Support for OS X."""
from __future__ import print_function
from rpython.translator.platform import posix
import os
#
# Although Intel 32bit is supported since Apple Mac OS X 10.4, (and PPC since, ever)
# the @rpath handling used in Darwin._args_for_shared is only availabe
# since 10.5, so we use that as minimum requirement. Bumped to 10.7
# to allow the use of thread-local in __thread in C.
# Bumped to 10.9 2021-11-22 to match CPython,
# see https://github.com/python/cpython/blob/42205ee51
#
# Keep in sync with MACOSX_DEPLOYMENT_TARGET, for pypy see
# lib_pypy/_sysconfigdata.py
#
DARWIN_VERSION_MIN = '-mmacosx-version-min=10.7'
class Darwin(posix.BasePosix):
name = "darwin"
standalone_only = ('-mdynamic-no-pic',)
shared_only = ()
link_flags = (DARWIN_VERSION_MIN,)
cflags = ('-O3',
'-fomit-frame-pointer',
DARWIN_VERSION_MIN,)
so_ext = 'dylib'
DEFAULT_CC = 'clang'
rpath_flags = ['-Wl,-rpath', '-Wl,@executable_path/']
def get_multiarch(self):
return 'darwin'
def get_rpath_flags(self, rel_libdirs):
# needed for cross compiling on ARM, needs fixing if relevant for darwin
if len(rel_libdirs) > 0:
print('in get_rpath_flags, rel_libdirs is not fixed up',rel_libdirs)
return self.rpath_flags
def _args_for_shared(self, args, **kwds):
if 'exe_name' in kwds:
target_basename = kwds['exe_name'].basename
else:
target_basename = '$(TARGET)'
# The default '$(TARGET)' is used inside a Makefile. Otherwise
# we get the basename of the executable we're trying to build.
return (list(self.shared_only)
+ ['-dynamiclib', '-install_name', '@rpath/' + target_basename,
'-undefined', 'dynamic_lookup', '-flat_namespace',
'-headerpad_max_install_names',
]
+ args)
def _include_dirs_for_libffi(self):
return self._pkg_config("libffi", "--cflags-only-I",
['/usr/include/ffi'],
check_result_dir=True)
def _library_dirs_for_libffi(self):
return self._pkg_config("libffi", "--libs-only-L",
['/usr/lib'],
check_result_dir=True)
def _include_dirs_for_openssl(self):
return self._pkg_config("openssl", "--cflags-only-I",
['/usr/include', '/usr/local/opt/openssl/include'],
check_result_dir=True)
def _library_dirs_for_openssl(self):
return self._pkg_config("openssl", "--libs-only-L",
['/usr/lib', '/usr/local/opt/openssl/lib'],
check_result_dir=True)
def _frameworks(self, frameworks):
args = []
for f in frameworks:
args.append('-framework')
args.append(f)
return args
def _link_args_from_eci(self, eci, standalone):
args = super(Darwin, self)._link_args_from_eci(eci, standalone)
frameworks = self._frameworks(eci.frameworks)
include_dirs = self._includedirs(eci.include_dirs)
return (args + frameworks + include_dirs)
def _exportsymbols_link_flags(self):
# XXX unsure if OS/X requires an option to the linker to tell
# "please export all RPY_EXPORTED symbols even in the case of
# making a binary and not a dynamically-linked library".
# It's not "-exported_symbols_list" but something close.
return []
def gen_makefile(self, cfiles, eci, exe_name=None, path=None,
shared=False, headers_to_precompile=[],
no_precompile_cfiles = [], profopt=False, config=None):
# ensure frameworks are passed in the Makefile
fs = self._frameworks(eci.frameworks)
extra_libs = self.extra_libs
if len(fs) > 0:
# concat (-framework, FrameworkName) pairs
self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2])))
mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path,
shared=shared,
headers_to_precompile=headers_to_precompile,
no_precompile_cfiles = no_precompile_cfiles,
profopt=profopt, config=config)
self.extra_libs = extra_libs
return mk
class Darwin_PowerPC(Darwin):#xxx fixme, mwp
name = "darwin_powerpc"
link_flags = Darwin.link_flags + ('-arch', 'ppc')
cflags = Darwin.cflags + ('-arch', 'ppc')
class Darwin_i386(Darwin):
name = "darwin_i386"
link_flags = Darwin.link_flags + ('-arch', 'i386')
cflags = Darwin.cflags + ('-arch', 'i386')
class Darwin_x86_64(Darwin):
name = "darwin_x86_64"
link_flags = Darwin.link_flags + ('-arch', 'x86_64')
cflags = Darwin.cflags + ('-arch', 'x86_64')
|
py | 7dfb5482a805b19690d9bf465f274bd99a1c8eb7 | """
Test that when being upgraded to version 4, a version 3 Inbox has the 'filter'
attribute set to a new L{xquotient.spam.Filter}, and that the other attributes
are copied over
"""
from axiom.test.historic.stubloader import StubbedTest
from xmantissa.webapp import PrivateApplication
from xquotient.spam import Filter
from xquotient.filter import Focus
from xquotient.inbox import Inbox
class InboxUpgradeTestCase(StubbedTest):
def test_focusAttributeSet(self):
"""
Test that L{xquotient.inbox.Inbox.focus} is set to the only Focus
powerup in the store.
"""
inbox = self.store.findUnique(Inbox)
focus = self.store.findUnique(Focus)
self.assertIdentical(focus, inbox.focus)
def test_focusInstalled(self):
"""
Test that the L{xquotient.filter.Focus} looks like it was properly
installed, by looking at its dependencies
"""
focus = self.store.findUnique(Focus)
self.failIf(
focus.messageSource is None,
'xquotient.filter.Focus was not installed properly')
def test_inboxAttributesCopied(self):
"""
Test that the attributes of the L{xquotient.inbox.Inbox} were copied
over from the previous version
"""
inbox = self.store.findUnique(Inbox)
self.assertEqual(inbox.uiComplexity, 2)
self.assertEqual(inbox.showMoreDetail, True)
self.assertIdentical(
inbox.privateApplication,
self.store.findUnique(PrivateApplication))
self.assertIdentical(
inbox.filter,
self.store.findUnique(Filter))
|
py | 7dfb552bdfe6732a9e2aff833d69a6ebf19ea945 | from .track_selected_count import TrackSelectedCount
from .track_played_count import TrackPlayedCount
from .genre_selected import GenreSelected
|
py | 7dfb553a126e71d32fea4f350266f65b81021a14 | from Agent import *
class Winner(Agent):
"""docstring for Wall."""
def __init__(self, environment, posX, posY, name):
super(Winner, self).__init__(environment, posX, posY, name)
self.color = "Yellow"
self.dead = False
def decide(self):
pass
def update(self):
pass
def isDead(self):
return self.dead |
py | 7dfb55e58dc6d2b3e6dfa7ed33421359c2c0c6c8 | import unittest
import rxbp
from rxbp.acknowledgement.continueack import continue_ack
from rxbp.observerinfo import ObserverInfo
from rxbp.indexed.selectors.bases.numericalbase import NumericalBase
from rxbp.subscriber import Subscriber
from rxbp.testing.tobserver import TObserver
from rxbp.testing.tscheduler import TScheduler
class TestFromRange(unittest.TestCase):
def setUp(self) -> None:
self.scheduler = TScheduler()
def test_base(self):
subscription = rxbp.from_range(1, 4).unsafe_subscribe(Subscriber(
scheduler=self.scheduler, subscribe_scheduler=self.scheduler))
self.assertEqual(NumericalBase(3), subscription.info.base)
def test_use_case(self):
sink = TObserver(immediate_continue=0)
subscription = rxbp.from_range(1, 4).unsafe_subscribe(Subscriber(
scheduler=self.scheduler, subscribe_scheduler=self.scheduler))
subscription.observable.observe(ObserverInfo(observer=sink))
self.scheduler.advance_by(1)
self.assertEqual([1, 2, 3], sink.received)
self.assertTrue(sink.is_completed)
def test_from_list_batch_size_of_one(self):
sink = TObserver(immediate_continue=0)
subscription = rxbp.from_range(1, 4, batch_size=1).unsafe_subscribe(Subscriber(
scheduler=self.scheduler, subscribe_scheduler=self.scheduler))
subscription.observable.observe(ObserverInfo(observer=sink))
self.scheduler.advance_by(1)
self.assertEqual([1], sink.received)
self.assertFalse(sink.is_completed)
sink.ack.on_next(continue_ack)
self.scheduler.advance_by(1)
self.assertEqual([1, 2], sink.received)
def test_from_list_batch_size_of_two(self):
sink = TObserver(immediate_continue=0)
subscription = rxbp.from_range(1, 4, batch_size=2).unsafe_subscribe(Subscriber(
scheduler=self.scheduler, subscribe_scheduler=self.scheduler))
subscription.observable.observe(ObserverInfo(observer=sink))
self.scheduler.advance_by(1)
self.assertEqual([1, 2], sink.received)
sink.ack.on_next(continue_ack)
self.scheduler.advance_by(1)
self.assertEqual([1, 2, 3], sink.received)
self.assertTrue(sink.is_completed) |
py | 7dfb585287bf3d6a0655dd1e2492b1c98519584b | #!/usr/bin/env python3
# spin_async.py
# credits: Example by Luciano Ramalho inspired by
# Michele Simionato's multiprocessing example in the python-list:
# https://mail.python.org/pipermail/python-list/2009-February/538048.html
# BEGIN PRIME_ASYNCIO
import asyncio
import itertools
import time
import spin_thread
async def spin(msg): # <1>
for char in itertools.cycle('⠇⠋⠙⠸⠴⠦'):
status = f'\r{char} {msg}'
print(status, flush=True, end='')
try:
await asyncio.sleep(.1) # <2>
except asyncio.CancelledError: # <3>
break
blanks = ' ' * len(status)
print(f'\r{blanks}\r', end='')
async def slow_function(): # <4>
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None,
spin_thread.fetch_by_size, 7_000_000)
return result
async def supervisor(): # <6>
spinner = asyncio.create_task(spin('thinking!')) # <7>
print('spinner object:', spinner) # <8>
result = await slow_function() # <9>
spinner.cancel() # <10>
return result
def main():
t0 = time.perf_counter()
size, name = asyncio.run(supervisor()) # <11>
dt = time.perf_counter() - t0
print(f'{size:_d} bytes downloaded')
print('Name:', name)
print(f'Elapsed time: {dt:0.3}s')
if __name__ == '__main__':
main()
# END SPINNER_ASYNCIO
|
py | 7dfb59b66bd6bc3d4e07dc9f6879ef6397de055b | from bibliopixel.animation.circle import Circle
from bibliopixel.colors import COLORS
class ArcRotate(Circle):
COLOR_DEFAULTS = ('colors', [COLORS.Red, COLORS.Green, COLORS.Blue]),
def __init__(self, layout, arc=180, outerRing=-1, outterRing=None, **kwds):
super().__init__(layout, **kwds)
if outterRing is not None:
# Legacy misspelling
outerRing = outterRing
if outerRing < 0 or outerRing > self.layout.lastRing:
outerRing = self.layout.lastRing
self.outerRing = outerRing
self.arcCount = len(self.palette)
self.arc = arc / 2
def pre_run(self):
self._step = 0
def step(self, amt=1):
self.layout.all_off()
ci = 0
for r in range(self.outerRing, self.outerRing - self.arcCount, -1):
c = self.palette(ci)
ci += 1
self.layout.fillRing(r, c, startAngle=self._step - self.arc, endAngle=self._step + self.arc)
self._step += amt
self._step %= 360
|
py | 7dfb59e36e93cab43bba06bb4eda9650c1a91b4f | # sqlalchemy/pool/dbapi_proxy.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""DBAPI proxy utility.
Provides transparent connection pooling on top of a Python DBAPI.
This is legacy SQLAlchemy functionality that is not typically used
today.
"""
from .impl import QueuePool
from .. import util
from ..util import threading
proxies = {}
@util.deprecated(
"1.3",
"The :func:`.pool.manage` function is deprecated, and will be "
"removed in a future release.",
)
def manage(module, **params):
r"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \**params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
with self._create_pool_mutex:
if key not in self.pools:
kw.pop("sa_pool_key", None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw
)
self.pools[key] = pool
return pool
else:
return self.pools[key]
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw["sa_pool_key"]
return tuple(list(args) + [(k, kw[k]) for k in sorted(kw)])
|
py | 7dfb5a02979b6ae64a050e287dc8c99f780839f1 | from archives.models import Document
from django import forms
from django.forms import SelectDateWidget
class DocumentForm(forms.ModelForm):
coverage_start = forms.DateField(label="Start Date", widget=SelectDateWidget(years=list(range(
1879, 1991))), help_text="Enter the start date for material covered in this document.")
coverage_end = forms.DateField(label="End Date", widget=SelectDateWidget(years=list(range(
1879, 1991))), help_text="For documents covering more than one day indicate the end date for material covered.")
class Meta:
model = Document
fields = '__all__'
|
py | 7dfb5a21e48dba73720fc17daa34384ff1390ba7 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import re
import redis
from frappe.utils import cint, strip_html_tags
from frappe.model.base_document import get_controller
from frappe.model.db_schema import varchar_len
from six import text_type
def setup_global_search_table():
"""
Creates __global_seach table
:return:
"""
if not '__global_search' in frappe.db.get_tables():
frappe.db.sql('''create table __global_search(
doctype varchar(100),
name varchar({varchar_len}),
title varchar({varchar_len}),
content text,
fulltext(content),
route varchar({varchar_len}),
published int(1) not null default 0,
unique `doctype_name` (doctype, name))
COLLATE=utf8mb4_unicode_ci
ENGINE=MyISAM
CHARACTER SET=utf8mb4'''.format(varchar_len=varchar_len))
def reset():
"""
Deletes all data in __global_search
:return:
"""
frappe.db.sql('delete from __global_search')
def get_doctypes_with_global_search(with_child_tables=True):
"""
Return doctypes with global search fields
:param with_child_tables:
:return:
"""
def _get():
global_search_doctypes = []
filters = {}
if not with_child_tables:
filters = {"istable": ["!=", 1], "issingle": ["!=", 1]}
for d in frappe.get_all('DocType', fields=['name', 'module'], filters=filters):
meta = frappe.get_meta(d.name)
if len(meta.get_global_search_fields()) > 0:
global_search_doctypes.append(d)
installed_apps = frappe.get_installed_apps()
module_app = frappe.local.module_app
doctypes = [
d.name for d in global_search_doctypes
if module_app.get(frappe.scrub(d.module))
and module_app[frappe.scrub(d.module)] in installed_apps
]
return doctypes
return frappe.cache().get_value('doctypes_with_global_search', _get)
def rebuild_for_doctype(doctype):
"""
Rebuild entries of doctype's documents in __global_search on change of
searchable fields
:param doctype: Doctype
"""
def _get_filters():
filters = frappe._dict({ "docstatus": ["!=", 2] })
if meta.has_field("enabled"):
filters.enabled = 1
if meta.has_field("disabled"):
filters.disabled = 0
return filters
meta = frappe.get_meta(doctype)
if cint(meta.istable) == 1:
parent_doctypes = frappe.get_all("DocField", fields="parent", filters={
"fieldtype": "Table",
"options": doctype
})
for p in parent_doctypes:
rebuild_for_doctype(p.parent)
return
# Delete records
delete_global_search_records_for_doctype(doctype)
parent_search_fields = meta.get_global_search_fields()
fieldnames = get_selected_fields(meta, parent_search_fields)
# Get all records from parent doctype table
all_records = frappe.get_all(doctype, fields=fieldnames, filters=_get_filters())
# Children data
all_children, child_search_fields = get_children_data(doctype, meta)
all_contents = []
for doc in all_records:
content = []
for field in parent_search_fields:
value = doc.get(field.fieldname)
if value:
content.append(get_formatted_value(value, field))
# get children data
for child_doctype, records in all_children.get(doc.name, {}).items():
for field in child_search_fields.get(child_doctype):
for r in records:
if r.get(field.fieldname):
content.append(get_formatted_value(r.get(field.fieldname), field))
if content:
# if doctype published in website, push title, route etc.
published = 0
title, route = "", ""
try:
if hasattr(get_controller(doctype), "is_website_published") and meta.allow_guest_to_view:
d = frappe.get_doc(doctype, doc.name)
published = 1 if d.is_website_published() else 0
title = d.get_title()
route = d.get("route")
except ImportError:
# some doctypes has been deleted via future patch, hence controller does not exists
pass
all_contents.append({
"doctype": frappe.db.escape(doctype),
"name": frappe.db.escape(doc.name),
"content": frappe.db.escape(' ||| '.join(content or '')),
"published": published,
"title": frappe.db.escape(title or '')[:int(varchar_len)],
"route": frappe.db.escape(route or '')[:int(varchar_len)]
})
if all_contents:
insert_values_for_multiple_docs(all_contents)
def delete_global_search_records_for_doctype(doctype):
frappe.db.sql('''
delete
from __global_search
where
doctype = %s''', doctype, as_dict=True)
def get_selected_fields(meta, global_search_fields):
fieldnames = [df.fieldname for df in global_search_fields]
if meta.istable==1:
fieldnames.append("parent")
elif "name" not in fieldnames:
fieldnames.append("name")
if meta.has_field("is_website_published"):
fieldnames.append("is_website_published")
return fieldnames
def get_children_data(doctype, meta):
"""
Get all records from all the child tables of a doctype
all_children = {
"parent1": {
"child_doctype1": [
{
"field1": val1,
"field2": val2
}
]
}
}
"""
all_children = frappe._dict()
child_search_fields = frappe._dict()
for child in meta.get_table_fields():
child_meta = frappe.get_meta(child.options)
search_fields = child_meta.get_global_search_fields()
if search_fields:
child_search_fields.setdefault(child.options, search_fields)
child_fieldnames = get_selected_fields(child_meta, search_fields)
child_records = frappe.get_all(child.options, fields=child_fieldnames, filters={
"docstatus": ["!=", 1],
"parenttype": doctype
})
for record in child_records:
all_children.setdefault(record.parent, frappe._dict())\
.setdefault(child.options, []).append(record)
return all_children, child_search_fields
def insert_values_for_multiple_docs(all_contents):
values = []
for content in all_contents:
values.append("( '{doctype}', '{name}', '{content}', '{published}', '{title}', '{route}')"
.format(**content))
batch_size = 50000
for i in range(0, len(values), batch_size):
batch_values = values[i:i + batch_size]
# ignoring duplicate keys for doctype_name
frappe.db.sql('''
insert ignore into __global_search
(doctype, name, content, published, title, route)
values
{0}
'''.format(", ".join(batch_values)))
def update_global_search(doc):
"""
Add values marked with `in_global_search` to
`frappe.flags.update_global_search` from given doc
:param doc: Document to be added to global search
"""
if doc.docstatus > 1 or (doc.meta.has_field("enabled") and not doc.get("enabled")) \
or doc.get("disabled"):
return
if frappe.flags.update_global_search==None:
frappe.flags.update_global_search = []
content = []
for field in doc.meta.get_global_search_fields():
if doc.get(field.fieldname) and field.fieldtype != "Table":
content.append(get_formatted_value(doc.get(field.fieldname), field))
# Get children
for child in doc.meta.get_table_fields():
for d in doc.get(child.fieldname):
if d.parent == doc.name:
for field in d.meta.get_global_search_fields():
if d.get(field.fieldname):
content.append(get_formatted_value(d.get(field.fieldname), field))
if content:
published = 0
if hasattr(doc, 'is_website_published') and doc.meta.allow_guest_to_view:
published = 1 if doc.is_website_published() else 0
title = (doc.get_title() or '')[:int(varchar_len)]
route = doc.get('route') if doc else ''
frappe.flags.update_global_search.append(
dict(
doctype=doc.doctype,
name=doc.name,
content=' ||| '.join(content or ''),
published=published,
title=title,
route=route
)
)
enqueue_global_search()
def enqueue_global_search():
if frappe.flags.update_global_search:
try:
frappe.enqueue('frappe.utils.global_search.sync_global_search',
now=frappe.flags.in_test or frappe.flags.in_install or frappe.flags.in_migrate,
flags=frappe.flags.update_global_search, enqueue_after_commit=True)
except redis.exceptions.ConnectionError:
sync_global_search()
frappe.flags.update_global_search = []
def get_formatted_value(value, field):
"""
Prepare field from raw data
:param value:
:param field:
:return:
"""
from six.moves.html_parser import HTMLParser
if getattr(field, 'fieldtype', None) in ["Text", "Text Editor"]:
h = HTMLParser()
value = h.unescape(value)
value = (re.subn(r'<[\s]*(script|style).*?</\1>(?s)', '', text_type(value))[0])
value = ' '.join(value.split())
return field.label + " : " + strip_html_tags(text_type(value))
def sync_global_search(flags=None):
"""
Add values from `flags` (frappe.flags.update_global_search) to __global_search.
This is called internally at the end of the request.
:param flags:
:return:
"""
if not flags:
flags = frappe.flags.update_global_search
# Can pass flags manually as frappe.flags.update_global_search isn't reliable at a later time,
# when syncing is enqueued
for value in flags:
frappe.db.sql('''
insert into __global_search
(doctype, name, content, published, title, route)
values
(%(doctype)s, %(name)s, %(content)s, %(published)s, %(title)s, %(route)s)
on duplicate key update
content = %(content)s''', value)
frappe.flags.update_global_search = []
def delete_for_document(doc):
"""
Delete the __global_search entry of a document that has
been deleted
:param doc: Deleted document
"""
frappe.db.sql('''
delete
from __global_search
where
doctype = %s and
name = %s''', (doc.doctype, doc.name), as_dict=True)
@frappe.whitelist()
def search(text, start=0, limit=20, doctype=""):
"""
Search for given text in __global_search
:param text: phrase to be searched
:param start: start results at, default 0
:param limit: number of results to return, default 20
:return: Array of result objects
"""
text = "+" + text + "*"
if not doctype:
results = frappe.db.sql('''
select
doctype, name, content
from
__global_search
where
match(content) against (%s IN BOOLEAN MODE)
limit {start}, {limit}'''.format(start=start, limit=limit), text+"*", as_dict=True)
else:
results = frappe.db.sql('''
select
doctype, name, content
from
__global_search
where
doctype = %s AND
match(content) against (%s IN BOOLEAN MODE)
limit {start}, {limit}'''.format(start=start, limit=limit), (doctype, text), as_dict=True)
for r in results:
try:
if frappe.get_meta(r.doctype).image_field:
r.image = frappe.db.get_value(r.doctype, r.name, frappe.get_meta(r.doctype).image_field)
except Exception:
frappe.clear_messages()
return results
@frappe.whitelist(allow_guest=True)
def web_search(text, start=0, limit=20):
"""
Search for given text in __global_search where published = 1
:param text: phrase to be searched
:param start: start results at, default 0
:param limit: number of results to return, default 20
:return: Array of result objects
"""
text = "+" + text + "*"
results = frappe.db.sql('''
select
doctype, name, content, title, route
from
__global_search
where
published = 1 and
match(content) against (%s IN BOOLEAN MODE)
limit {start}, {limit}'''.format(start=start, limit=limit),
text, as_dict=True)
return results
|
py | 7dfb5b10edc1414647ce6fa26a56bb5fd89702ae | #
# Author: Tiberiu Boros
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dynet_config
import optparse
import sys
import numpy as np
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--cleanup', action='store_true', dest='cleanup',
help='Cleanup temporary training files and start from fresh')
parser.add_option('--phase', action='store', dest='phase',
choices=['1', '2', '3', '4', '5'],
help='select phase: 1 - prepare corpus; 2 - train vocoder; 3 - train encoder; 4 - end-to-end; 5 - test vocoder')
parser.add_option("--batch-size", action='store', dest='batch_size', default='1000', type='int',
help='number of samples in a single batch (default=1000)')
parser.add_option("--set-mem", action='store', dest='memory', default='2048', type='int',
help='preallocate memory for batch training (default 2048)')
parser.add_option("--autobatch", action='store_true', dest='autobatch',
help='turn on/off dynet autobatching')
parser.add_option("--resume", action='store_true', dest='resume',
help='resume from last checkpoint')
parser.add_option("--no-guided-attention", action='store_true', dest='no_guided_attention',
help='disable guided attention')
parser.add_option("--no-bounds", action='store_true', dest='no_bounds',
help='disable fixed synthesis length')
parser.add_option("--use-gpu", action='store_true', dest='gpu',
help='turn on/off GPU support')
parser.add_option('--train-folder', action='store', dest='train_folder',
help='Location of the training files')
parser.add_option('--dev-folder', action='store', dest='dev_folder',
help='Location of the development files')
parser.add_option('--target-sample-rate', action='store', dest='target_sample_rate',
help='Resample input files at this rate (default=16000)', type='int', default=16000)
parser.add_option('--mgc-order', action='store', dest='mgc_order', type='int',
help='Order of MGC parameters (default=60)', default=60)
(params, _) = parser.parse_args(sys.argv)
memory = int(params.memory)
if params.autobatch:
autobatch = True
else:
autobatch = False
dynet_config.set(mem=memory, random_seed=9, autobatch=autobatch)
if params.gpu:
dynet_config.set_gpu()
def array2file(a, filename):
np.save(filename, a)
def file2array(filename):
a = np.load(filename)
return a
def render_spectrogram(mgc, output_file):
bitmap = np.zeros((mgc.shape[1], mgc.shape[0], 3), dtype=np.uint8)
mgc_min = mgc.min()
mgc_max = mgc.max()
for x in xrange(mgc.shape[0]):
for y in xrange(mgc.shape[1]):
val = (mgc[x, y] - mgc_min) / (mgc_max - mgc_min)
color = val * 255
bitmap[mgc.shape[1] - y - 1, x] = [color, color, color]
import scipy.misc as smp
img = smp.toimage(bitmap)
img.save(output_file)
def create_lab_file(txt_file, lab_file):
fin = open(txt_file, 'r')
fout = open(lab_file, 'w')
line = fin.readline().decode('utf-8').strip().replace('\t', ' ')
while True:
nl = line.replace(' ', ' ')
if nl == line:
break
line = nl
fout.write('START\n')
for char in line:
l_char = char.lower()
style = 'CASE:lower'
if l_char == l_char.upper():
style = 'CASE:symb'
elif l_char != char:
style = 'CASE:upper'
if len(txt_file.replace('\\', '/').split('/')[-1].split('_')) != 1:
speaker = 'SPEAKER:' + txt_file.replace('\\', '/').split('_')[0].split('/')[-1]
else:
speaker = 'SPEAKER:none'
fout.write(l_char.encode('utf-8') + '\t' + speaker + '\t' + style + '\n')
fout.write('STOP\n')
fin.close()
fout.close()
return ""
def phase_1_prepare_corpus(params):
from os import listdir
from os.path import isfile, join
from os.path import exists
train_files_tmp = [f for f in listdir(params.train_folder) if isfile(join(params.train_folder, f))]
dev_files_tmp = [f for f in listdir(params.dev_folder) if isfile(join(params.dev_folder, f))]
sys.stdout.write("Scanning training files...")
sys.stdout.flush()
final_list = []
for file in train_files_tmp:
base_name = file[:-4]
lab_name = base_name + '.txt'
wav_name = base_name + '.wav'
if exists(join(params.train_folder, lab_name)) and exists(join(params.train_folder, wav_name)):
if base_name not in final_list:
final_list.append(base_name)
train_files = final_list
sys.stdout.write(" found " + str(len(train_files)) + " valid training files\n")
sys.stdout.write("Scanning development files...")
sys.stdout.flush()
final_list = []
for file in dev_files_tmp:
base_name = file[:-4]
lab_name = base_name + '.txt'
wav_name = base_name + '.wav'
if exists(join(params.dev_folder, lab_name)) and exists(join(params.dev_folder, wav_name)):
if base_name not in final_list:
final_list.append(base_name)
dev_files = final_list
sys.stdout.write(" found " + str(len(dev_files)) + " valid development files\n")
from io_modules.dataset import DatasetIO
from io_modules.vocoder import MelVocoder
from shutil import copyfile
import pysptk
dio = DatasetIO()
vocoder = MelVocoder()
base_folder = params.train_folder
for index in xrange(len(train_files)):
sys.stdout.write("\r\tprocessing file " + str(index + 1) + "/" + str(len(train_files)))
sys.stdout.flush()
base_name = train_files[index]
txt_name = base_name + '.txt'
wav_name = base_name + '.wav'
spc_name = base_name + '.png'
lab_name = base_name + '.lab'
# LAB - copy or create
if exists(join(base_folder, lab_name)):
copyfile(join(base_folder, lab_name), join('data/processed/train', lab_name))
else:
create_lab_file(join(base_folder, txt_name), join('data/processed/train', lab_name))
# TXT
copyfile(join(base_folder, txt_name), join('data/processed/train', txt_name))
# WAVE
data, sample_rate = dio.read_wave(join(base_folder, wav_name), sample_rate=params.target_sample_rate)
mgc = vocoder.melspectrogram(data, sample_rate=params.target_sample_rate, num_mels=params.mgc_order)
# SPECT
render_spectrogram(mgc, join('data/processed/train', spc_name))
dio.write_wave(join('data/processed/train', base_name + '.orig.wav'), data, sample_rate)
array2file(mgc, join('data/processed/train', base_name + '.mgc'))
sys.stdout.write('\n')
base_folder = params.dev_folder
for index in xrange(len(dev_files)):
sys.stdout.write("\r\tprocessing file " + str(index + 1) + "/" + str(len(dev_files)))
sys.stdout.flush()
base_name = dev_files[index]
txt_name = base_name + '.txt'
wav_name = base_name + '.wav'
spc_name = base_name + '.png'
lab_name = base_name + '.lab'
# LAB - copy or create
if exists(join(base_folder, lab_name)):
copyfile(join(base_folder, lab_name), join('data/processed/dev', lab_name))
else:
create_lab_file(join(base_folder, txt_name), join('data/processed/dev', lab_name))
# TXT
copyfile(join(base_folder, txt_name), join('data/processed/dev/', txt_name))
# WAVE
data, sample_rate = dio.read_wave(join(base_folder, wav_name), sample_rate=params.target_sample_rate)
mgc = vocoder.melspectrogram(data, sample_rate=params.target_sample_rate, num_mels=params.mgc_order)
# SPECT
render_spectrogram(mgc, join('data/processed/dev', spc_name))
dio.write_wave(join('data/processed/dev', base_name + '.orig.wav'), data, sample_rate)
array2file(mgc, join('data/processed/dev', base_name + '.mgc'))
sys.stdout.write('\n')
def phase_2_train_vocoder(params):
from io_modules.dataset import Dataset
from models.vocoder import Vocoder
from trainers.vocoder import Trainer
vocoder = Vocoder(params)
if params.resume:
sys.stdout.write('Resuming from previous checkpoint\n')
vocoder.load('data/models/rnn_vocoder')
trainset = Dataset("data/processed/train")
devset = Dataset("data/processed/dev")
sys.stdout.write('Found ' + str(len(trainset.files)) + ' training files and ' + str(
len(devset.files)) + ' development files\n')
trainer = Trainer(vocoder, trainset, devset)
trainer.start_training(20, params.batch_size, params.target_sample_rate)
def phase_3_train_encoder(params):
from io_modules.dataset import Dataset
from io_modules.dataset import Encodings
from models.encoder import Encoder
from trainers.encoder import Trainer
trainset = Dataset("data/processed/train")
devset = Dataset("data/processed/dev")
sys.stdout.write('Found ' + str(len(trainset.files)) + ' training files and ' + str(
len(devset.files)) + ' development files\n')
encodings = Encodings()
count = 0
if not params.resume:
for train_file in trainset.files:
count += 1
if count % 100 == 0:
sys.stdout.write('\r' + str(count) + '/' + str(len(trainset.files)) + ' processed files')
sys.stdout.flush()
from io_modules.dataset import DatasetIO
dio = DatasetIO()
lab_list = dio.read_lab(train_file + ".lab")
for entry in lab_list:
encodings.update(entry)
sys.stdout.write('\r' + str(count) + '/' + str(len(trainset.files)) + ' processed files\n')
sys.stdout.write('Found ' + str(len(encodings.char2int)) + ' unique symbols, ' + str(
len(encodings.context2int)) + ' unique features and ' + str(
len(encodings.speaker2int)) + ' unique speakers\n')
encodings.store('data/models/encoder.encodings')
else:
encodings.load('data/models/encoder.encodings')
if params.resume:
runtime = True # avoid ortonormal initialization
else:
runtime = False
encoder = Encoder(params, encodings, runtime=runtime)
if params.resume:
sys.stdout.write('Resuming from previous checkpoint\n')
encoder.load('data/models/rnn_encoder')
if params.no_guided_attention:
sys.stdout.write('Disabling guided attention\n')
if params.no_bounds:
sys.stdout.write('Using internal stopping condition for synthesis\n')
trainer = Trainer(encoder, trainset, devset)
trainer.start_training(10, 1000, params)
def phase_5_test_vocoder(params):
from io_modules.dataset import Dataset
from models.vocoder import Vocoder
from trainers.vocoder import Trainer
vocoder = Vocoder(params, runtime=True)
vocoder.load('data/models/rnn')
trainset = Dataset("data/processed/train")
devset = Dataset("data/processed/dev")
sys.stdout.write('Found ' + str(len(trainset.files)) + ' training files and ' + str(
len(devset.files)) + ' development files\n')
trainer = Trainer(vocoder, trainset, devset)
trainer.synth_devset(params.batch_size, target_sample_rate=params.target_sample_rate, sample=True,
temperature=0.8)
if params.phase and params.phase == '1':
phase_1_prepare_corpus(params)
if params.phase and params.phase == '2':
phase_2_train_vocoder(params)
if params.phase and params.phase == '3':
phase_3_train_encoder(params)
if params.phase and params.phase == '4':
print ("Not yet implemented. Still wondering if this is really required")
if params.phase and params.phase == '5':
phase_5_test_vocoder(params)
|
py | 7dfb5b292e899ee86c9073b3697e0708367d4898 | """A check to ensure author initials are separated by a space."""
import re
import bibcheck.checker
class Issue(bibcheck.checker.Issue):
"""Represents an issue with DOIs."""
@property
def message(self):
"""Get the message for this issue."""
return "DOI entries should just contain the DOI."
class DoiChecker(bibcheck.checker.Checker): # pylint: disable=too-few-public-methods
"""
Check for issues with DOI entries.
1. DOI entries should not be a URL: doi={10.1000/foo} instead of
doi={https://doi.org/10.1000/foo}.
"""
def __init__(self):
self.__doi_url_regex = re.compile(r'doi\s*=\s*[{"]\s*https://doi.org/.*["}]')
def check(self, line: bibcheck.checker.Line):
"""
Check if a line contains any DOI issues.
:param bibcheck.checker.Line line: The line of text to check, along with file context
information.
:return: A list of issues if the given ``line`` contains DOI problems.
:rtype: list of bibcheck.checks.doi.Issue objects.
"""
if self.__doi_url_regex.search(line.text):
return bibcheck.checks.doi.Issue(line.file_path, line.line_number)
return []
|
py | 7dfb5ca95bbfca6c2d029cddb0e78c8682900daa | # http://www.codewars.com/kata/56a946cd7bd95ccab2000055/
def lowercase_count(strng):
total = 0
for letter in strng:
if ord(letter) >= 97 and ord(letter) <= 122:
total += 1
return total
|
py | 7dfb5cbf60ef49544c4ea717f7c5c4fa8b3c4a5b | from django.shortcuts import render, redirect
#from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from .forms import RegisterForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def register(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Welcome {username}, your account has been created !')
return redirect('login')
else:
form = RegisterForm()
return render(request, 'users/register.html', {'form':form})
@login_required
def profilpage(request):
return render(request, 'users/profile.html') |
py | 7dfb5d162be81c00abe9d49c275953cfe5ea4a00 | """
The registry for samplers designed to partition the dataset across the clients.
Having a registry of all available classes is convenient for retrieving an instance based
on a configuration at run-time.
"""
import logging
from collections import OrderedDict
from plato.config import Config
if hasattr(Config().trainer, 'use_mindspore'):
from plato.samplers.mindspore import (
iid as iid_mindspore,
dirichlet as dirichlet_mindspore,
)
registered_samplers = OrderedDict([
('iid', iid_mindspore.Sampler),
('noniid', dirichlet_mindspore.Sampler),
])
elif hasattr(Config().trainer, 'use_tensorflow'):
from plato.samplers.tensorflow import base
registered_samplers = OrderedDict([
('iid', base.Sampler),
('noniid', base.Sampler),
('mixed', base.Sampler),
])
else:
try:
from plato.samplers import (iid, dirichlet, mixed, orthogonal,
all_inclusive)
registered_samplers = OrderedDict([
('iid', iid.Sampler),
('noniid', dirichlet.Sampler),
('mixed', mixed.Sampler),
('orthogonal', orthogonal.Sampler),
('all_inclusive', all_inclusive.Sampler),
])
except:
from plato.samplers.nnrt import base
registered_samplers = OrderedDict([
('iid', base.Sampler),
('noniid', base.Sampler),
('mixed', base.Sampler),
])
def get(datasource, client_id):
"""Get an instance of the sampler."""
if hasattr(Config().data, 'sampler'):
sampler_type = Config().data.sampler
else:
sampler_type = 'iid'
logging.info("[Client #%d] Sampler: %s", client_id, sampler_type)
if sampler_type in registered_samplers:
registered_sampler = registered_samplers[sampler_type](datasource,
client_id)
else:
raise ValueError('No such sampler: {}'.format(sampler_type))
return registered_sampler
|
py | 7dfb5fb9df48579583c84ee90f3df3fc057e3f76 | # Title: Dijkstra's Algorithm for finding single source shortest path from scratch
# Author: Shubham Malik
# References: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
import math
import sys
# For storing the vertex set to retreive node with the lowest distance
class PriorityQueue:
# Based on Min Heap
def __init__(self):
self.cur_size = 0
self.array = []
self.pos = {} # To store the pos of node in array
def isEmpty(self):
return self.cur_size == 0
def min_heapify(self, idx):
lc = self.left(idx)
rc = self.right(idx)
if lc < self.cur_size and self.array(lc)[0] < self.array(idx)[0]:
smallest = lc
else:
smallest = idx
if rc < self.cur_size and self.array(rc)[0] < self.array(smallest)[0]:
smallest = rc
if smallest != idx:
self.swap(idx, smallest)
self.min_heapify(smallest)
def insert(self, tup):
# Inserts a node into the Priority Queue
self.pos[tup[1]] = self.cur_size
self.cur_size += 1
self.array.append((sys.maxsize, tup[1]))
self.decrease_key((sys.maxsize, tup[1]), tup[0])
def extract_min(self):
# Removes and returns the min element at top of priority queue
min_node = self.array[0][1]
self.array[0] = self.array[self.cur_size - 1]
self.cur_size -= 1
self.min_heapify(1)
del self.pos[min_node]
return min_node
def left(self, i):
# returns the index of left child
return 2 * i + 1
def right(self, i):
# returns the index of right child
return 2 * i + 2
def par(self, i):
# returns the index of parent
return math.floor(i / 2)
def swap(self, i, j):
# swaps array elements at indices i and j
# update the pos{}
self.pos[self.array[i][1]] = j
self.pos[self.array[j][1]] = i
temp = self.array[i]
self.array[i] = self.array[j]
self.array[j] = temp
def decrease_key(self, tup, new_d):
idx = self.pos[tup[1]]
# assuming the new_d is atmost old_d
self.array[idx] = (new_d, tup[1])
while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]:
self.swap(idx, self.par(idx))
idx = self.par(idx)
class Graph:
def __init__(self, num):
self.adjList = {} # To store graph: u -> (v,w)
self.num_nodes = num # Number of nodes in graph
# To store the distance from source vertex
self.dist = [0] * self.num_nodes
self.par = [-1] * self.num_nodes # To store the path
def add_edge(self, u, v, w):
# Edge going from node u to v and v to u with weight w
# u (w)-> v, v (w) -> u
# Check if u already in graph
if u in self.adjList.keys():
self.adjList[u].append((v, w))
else:
self.adjList[u] = [(v, w)]
# Assuming undirected graph
if v in self.adjList.keys():
self.adjList[v].append((u, w))
else:
self.adjList[v] = [(u, w)]
def show_graph(self):
# u -> v(w)
for u in self.adjList:
print(
u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u]),
)
def dijkstra(self, src):
# Flush old junk values in par[]
self.par = [-1] * self.num_nodes
# src is the source node
self.dist[src] = 0
Q = PriorityQueue()
Q.insert((0, src)) # (dist from src, node)
for u in self.adjList.keys():
if u != src:
self.dist[u] = sys.maxsize # Infinity
self.par[u] = -1
while not Q.isEmpty():
u = Q.extract_min() # Returns node with the min dist from source
# Update the distance of all the neighbours of u and
# if their prev dist was INFINITY then push them in Q
for v, w in self.adjList[u]:
new_dist = self.dist[u] + w
if self.dist[v] > new_dist:
if self.dist[v] == sys.maxsize:
Q.insert((new_dist, v))
else:
Q.decrease_key((self.dist[v], v), new_dist)
self.dist[v] = new_dist
self.par[v] = u
# Show the shortest distances from src
self.show_distances(src)
def show_distances(self, src):
print(f"Distance from node: {src}")
for u in range(self.num_nodes):
print(f"Node {u} has distance: {self.dist[u]}")
def show_path(self, src, dest):
# To show the shortest path from src to dest
# WARNING: Use it *after* calling dijkstra
path = []
cost = 0
temp = dest
# Backtracking from dest to src
while self.par[temp] != -1:
path.append(temp)
if temp != src:
for v, w in self.adjList[temp]:
if v == self.par[temp]:
cost += w
break
temp = self.par[temp]
path.append(src)
path.reverse()
print(f"----Path to reach {dest} from {src}----")
for u in path:
print(f"{u}", end=" ")
if u != dest:
print("-> ", end="")
print("\nTotal cost of path: ", cost)
if __name__ == "__main__":
graph = Graph(9)
graph.add_edge(0, 1, 4)
graph.add_edge(0, 7, 8)
graph.add_edge(1, 2, 8)
graph.add_edge(1, 7, 11)
graph.add_edge(2, 3, 7)
graph.add_edge(2, 8, 2)
graph.add_edge(2, 5, 4)
graph.add_edge(3, 4, 9)
graph.add_edge(3, 5, 14)
graph.add_edge(4, 5, 10)
graph.add_edge(5, 6, 2)
graph.add_edge(6, 7, 1)
graph.add_edge(6, 8, 6)
graph.add_edge(7, 8, 7)
graph.show_graph()
graph.dijkstra(0)
graph.show_path(0, 4)
# OUTPUT
# 0 -> 1(4) -> 7(8)
# 1 -> 0(4) -> 2(8) -> 7(11)
# 7 -> 0(8) -> 1(11) -> 6(1) -> 8(7)
# 2 -> 1(8) -> 3(7) -> 8(2) -> 5(4)
# 3 -> 2(7) -> 4(9) -> 5(14)
# 8 -> 2(2) -> 6(6) -> 7(7)
# 5 -> 2(4) -> 3(14) -> 4(10) -> 6(2)
# 4 -> 3(9) -> 5(10)
# 6 -> 5(2) -> 7(1) -> 8(6)
# Distance from node: 0
# Node 0 has distance: 0
# Node 1 has distance: 4
# Node 2 has distance: 12
# Node 3 has distance: 19
# Node 4 has distance: 21
# Node 5 has distance: 11
# Node 6 has distance: 9
# Node 7 has distance: 8
# Node 8 has distance: 14
# ----Path to reach 4 from 0----
# 0 -> 7 -> 6 -> 5 -> 4
# Total cost of path: 21
|
py | 7dfb60a32fc945aaa9343b54c05d19ea23be2224 | import json
import urllib.request
import re
import os
from collections import Counter
# *** CHANGE THIS as appropriate ***:
# Base path to save all data to
my_base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data/open.canada.ca")
# List of data formats that we don't want to download
skip_download = []
# skip_download = ['HTML','CSV','PDF','XLS','ZIP','XML','TXT','GML']
# INPUT: URL to json file
# OUTPUT: json data
def get_jsonparsed_data(json_url):
response = urllib.request.urlopen(json_url)
str_response = response.read().decode('utf-8')
return json.loads(str_response)
# INPUT: Name of file with URLs. Finds URLs that point to open.canada.
# OUTPUT: URLs to json files, the corresponding open.canada web pages, and open.canada IDs.
def get_json_urls(hyperlinks_text_file_name):
f = open(hyperlinks_text_file_name, 'r')
json_urls = []; # list of URLs to json metadata files of open.canada data
open_canada_urls = []; # corresponding list of URLs to open.canada data
open_canada_IDs = []; # corresponding IDs
for line in f:
# print(line,end='')
match = re.search('open.canada.ca', line) # match = re.search(pat, text)
if match:
# print(line,end='')
ID = re.findall('/dataset/(.+)', line)
# print(ID)
json_urls.append("http://open.canada.ca/data/api/action/package_show?id=" + str(ID[0]))
open_canada_urls.append(line.strip('\n'))
open_canada_IDs.append(ID[0])
f.close()
return (json_urls, open_canada_urls, open_canada_IDs)
# INPUT: json description provided by open.canada (and a URL to open.canada web-page)
# OUTPUT: metadata in our format
def parse_orig_json(json_data, open_canada_url):
my_metadata = {} # create empty dict to be filled with metadata
my_metadata['title'] = json_data['result']['title']
my_metadata['source_page'] = open_canada_url
# fields below still need to be filled with actual values
my_metadata['source_files'] = [] # [d.get('url') for d in json_data['result']['resources'] if d.get('url')] # ['http://url_to_source_file_1','http://url_to_source_file_2']
my_metadata['Category'] = 'Open Data'
my_metadata['data_last_modified'] = json_data['result']['revision_timestamp']
my_metadata['data_schema'] = {}
my_metadata['description'] = json_data['result']['notes']
my_metadata['license'] = json_data['result']['license_url']
my_metadata['tags'] = []
my_metadata['update_frequency'] = 'Other'
return my_metadata
# Saves file from URL to folder_name, using specified file_name or automatically assigned one.
# INPUT: URL; folder_name where file will be saved; file_name = 0 for automatic assignment.
def download_file(URL, folder_name, file_name=0):
if file_name == 0: # if file name is not specified
file_name = os.path.basename(URL) # get file name
full_path_to_save = os.path.join(folder_name, file_name)
try:
urllib.request.urlretrieve(URL, full_path_to_save)
except urllib.request.HTTPError: # If unable to download, save failed URL to download_errors.txt
print('There was an error with the request')
f = open(os.path.join(folder_name, 'download_errors.txt'), 'a')
f.write(URL + '\n')
f.close()
def get_all_data_types(open_canada_IDs, json_urls):
# Find all types of data resouces, count number of files of each type and get the following result:
# {'CSV': 466,
# 'HTML': 211,
# 'JSON': 3,
# 'PDF': 27,
# 'SHAPE': 3,
# 'TXT': 18,
# 'XLS': 111,
# 'XML': 92,
# 'ZIP': 38,
# 'doc': 3,
# 'fgdb / gdb': 1,
# 'gml': 3,
# 'jpeg 2000': 19,
# 'kml / kmz': 1,
# 'other': 54,
# 'rtf': 2,
# 'wfs': 1,
# 'wms': 1})
# Can list these types in skip_download = [] to skip downloading certain types.
res_type = [];
for idx in range(0, len(open_canada_IDs)):
print("Processing data source " + str(idx) + ", ID: " + str(open_canada_IDs[idx]))
json_data = get_jsonparsed_data(json_urls[idx])
for res in json_data['result']['resources']:
res_type.append(res['format'])
set(res_type)
res_type.sort()
return Counter(res_type)
# Get json_urls, open_canada_urls and open_canada_IDs from the text file containing hyperlinks.
( json_urls , open_canada_urls, open_canada_IDs ) = get_json_urls("sources/open.canada.ca.txt")
# Main loop for downloading data from open.data
# for idx in range(0,1):
for idx in range(0, len(open_canada_IDs)):
print("\nProcessing data source " + str(idx) + ", ID: " + str(open_canada_IDs[idx]))
folder_path = os.path.join(my_base_path, open_canada_IDs[idx])
print(folder_path)
# create folder to download files to
if not os.path.exists(folder_path): os.makedirs(folder_path)
# download original json
orig_json_filename = open_canada_IDs[idx] + '.json'
download_file(json_urls[idx], folder_path, orig_json_filename)
# get data from original json
json_data = get_jsonparsed_data(json_urls[idx])
# create metadata from original json
metadata = parse_orig_json(json_data, open_canada_urls[idx])
# download all data resources
for res in json_data['result']['resources']:
if res['format'] in skip_download:
print(" Skipping: " + res['url'])
else:
print(" Downloading: " + res['url'])
download_file(res['url'], folder_path)
metadata['source_files'].append(res['url'])
# save metadata
fp = open(os.path.join(folder_path, 'metadata.json'), 'w')
json.dump(metadata, fp)
fp.close() |
py | 7dfb6233aacb992b02c259d4562d413a091be1b9 | from cltk_capitains_corpora_converter import run
def update():
run(
"cloned",
output="json",
repository="https://github.com/OpenGreekAndLatin/csel-dev.git",
nodes=["tei:note", "tei:orig"],
credit="Open Philology, Humboldt Chair of Digital Humanities ( https://github.com/OpenGreekAndLatin/csel-dev )",
silent=False
)
if __name__ == '__main__':
update() |
py | 7dfb62dbb2a161b1d33778f92122175938d9ce29 | import torch
from torch import nn
from torch.nn import functional as F
from torch.distributions import Normal, OneHotCategorical
import logging
logger = logging.getLogger()
class MixtureDensityNet(nn.Module):
def __init__(self, n_input: int, n_output: int, n_component: int):
"""
Parameters
----------
n_input : int
the dimension of input feature
n_output :
the dimension of output space
n_component :
the number of component of Gauss distribution
"""
super(MixtureDensityNet, self).__init__()
self.n_input = n_input
self.n_output = n_output
self.n_component = n_component
self.mu_linear = nn.Linear(n_input, n_output * n_component)
self.logsigma_linear = nn.Linear(n_input, n_output * n_component)
self.logpi_linear = nn.Linear(n_input, n_component)
def forward(self, feature):
"""
Parameters
----------
feature : torch.Tensor[N, n_input]
input feature
Returns
-------
norm: Normal
cat: OneHotCategorical
"""
n_data = feature.size()[0]
mu = self.mu_linear(feature).reshape((n_data, self.n_output, self.n_component))
logsigma = self.logsigma_linear(feature).reshape((n_data, self.n_output, self.n_component))
norm = Normal(loc=mu, scale=torch.exp(logsigma))
logpi = self.logpi_linear(feature)
logpi = logpi - torch.min(logpi)
cat = OneHotCategorical(logits=logpi)
return norm, cat
|
py | 7dfb62ff0eaeee5117fa534d31549859b4ec5be2 | """packer_builder/specs/builders/__init__.py"""
|
py | 7dfb63c23cdd6051009742d6b84db928b43c8fb2 | def sample_func(a, b, c):
return 'Hello'
class SampleClass:
def __init__(self, a):
self.a = a
def get_a(self):
return self.a
def set_a(self, x):
self.a = a
|
py | 7dfb653d066194c8218bf3cb994633df9a208dc3 | #!flask/bin/python
from flask import Flask, jsonify, make_response, abort, request
from wuxingData import wuxingDic
import main
import characters
import metaphysic
import readDic
app = Flask(__name__)
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
@app.route('/api/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@app.route('/api/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = filter(lambda t: t['id'] == task_id, tasks)
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
@app.route('/api/tasks', methods=['POST'])
def create_task():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({'task': task}), 201
@app.route('/api/tasks/<int:task_id>', methods=['PUT'])
def update_task(task_id):
task = filter(lambda t: t['id'] == task_id, tasks)
if len(task) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task[0]['title'] = request.json.get('title', task[0]['title'])
task[0]['description'] = request.json.get('description', task[0]['description'])
task[0]['done'] = request.json.get('done', task[0]['done'])
return jsonify({'task': task[0]})
@app.route('/api/tasks/<int:task_id>', methods=['DELETE'])
def delete_task(task_id):
task = filter(lambda t: t['id'] == task_id, tasks)
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return jsonify({'result': True})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.debug = True
app.run() |
py | 7dfb6558746488ff82e5e94132a99d7e88502ad5 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestLengthsTileOp(hu.HypothesisTestCase):
@given(
inputs=st.integers(min_value=1, max_value=20).flatmap(
lambda size: st.tuples(
hu.arrays([size]),
hu.arrays([size], dtype=np.int32,
elements=st.integers(min_value=0, max_value=20)),
)
),
**hu.gcs_cpu_only)
def test_lengths_tile(self, inputs, gc, dc):
data, lengths = inputs
def lengths_tile_op(data, lengths):
return [np.concatenate([
[d] * l for d, l in zip(data, lengths)
])]
op = core.CreateOperator(
"LengthsTile",
["data", "lengths"],
["output"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=lengths_tile_op,
)
|
py | 7dfb66a87cde16cf4fe4ec13fe68e8ace0320859 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/armor/shared_mass_reduction_kit_mk5.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","mass_reduction_kit_mk5")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 7dfb66f2f931776459c34e7733dcb05176a69013 | import numpy.random as rd
import torch
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import QNet, QNetDuel
class AgentDQN(AgentBase): # [ElegantRL.2021.12.12]
"""
Bases: ``AgentBase``
Deep Q-Network algorithm. “Human-Level Control Through Deep Reinforcement Learning”. Mnih V. et al.. 2015.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(self):
AgentBase.__init__(self)
self.ClassCri = QNet
self.explore_rate = (
0.25 # the probability of choosing action randomly in epsilon-greedy
)
def init(
self,
net_dim=256,
state_dim=8,
action_dim=2,
reward_scale=1.0,
gamma=0.99,
learning_rate=1e-4,
if_per_or_gae=False,
env_num=1,
gpu_id=0,
):
"""
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
"""
AgentBase.init(
self,
net_dim=net_dim,
state_dim=state_dim,
action_dim=action_dim,
reward_scale=reward_scale,
gamma=gamma,
learning_rate=learning_rate,
if_per_or_gae=if_per_or_gae,
env_num=env_num,
gpu_id=gpu_id,
)
if if_per_or_gae: # if_use_per
self.criterion = torch.nn.SmoothL1Loss(reduction="none")
self.get_obj_critic = self.get_obj_critic_per
else:
self.criterion = torch.nn.SmoothL1Loss(reduction="mean")
self.get_obj_critic = self.get_obj_critic_raw
def select_actions(
self, states: torch.Tensor
) -> torch.Tensor: # for discrete action space
"""
Select discrete actions given an array of states.
.. note::
Using ϵ-greedy to select uniformly random actions for exploration.
:param states: an array of states in a shape (batch_size, state_dim, ).
:return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).
"""
if rd.rand() < self.explore_rate: # epsilon-greedy
a_ints = torch.randint(
self.action_dim, size=states.shape[0]
) # choosing action randomly
else:
actions = self.act(states.to(self.device))
a_ints = actions.argmax(dim=1)
return a_ints.detach().cpu()
def explore_one_env(self, env, target_step) -> list:
"""
Collect trajectories through the actor-environment interaction for a **single** environment instance.
:param env: the DRL environment instance.
:param target_step: the total step for the interaction.
:param reward_scale: a reward scalar to clip the reward.
:param gamma: the discount factor.
:return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
"""
traj = []
state = self.states[0]
for _ in range(target_step):
ten_state = torch.as_tensor(state, dtype=torch.float32)
ten_action = self.select_actions(ten_state.unsqueeze(0))[0]
action = ten_action.numpy() # isinstance(action, int)
next_s, reward, done, _ = env.step(action)
ten_other = torch.empty(2 + 1)
ten_other[0] = reward
ten_other[1] = done
ten_other[2] = ten_action
traj.append((ten_state, ten_other))
state = env.reset() if done else next_s
self.states[0] = state
traj_state = torch.stack([item[0] for item in traj])
traj_other = torch.stack([item[1] for item in traj])
traj_list = [
(traj_state, traj_other),
]
return self.convert_trajectory(traj_list) # [traj_env_0, ...]
def explore_vec_env(self, env, target_step) -> list:
"""
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.
:param env: the DRL environment instance.
:param target_step: the total step for the interaction.
:param reward_scale: a reward scalar to clip the reward.
:param gamma: the discount factor.
:return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
"""
ten_states = self.states
traj = []
for _ in range(target_step):
ten_actions = self.select_actions(ten_states)
ten_next_states, ten_rewards, ten_dones = env.step(ten_actions)
ten_others = torch.cat(
(
ten_rewards.unsqueeze(0),
ten_dones.unsqueeze(0),
ten_actions.unsqueeze(0),
)
)
traj.append((ten_states, ten_others))
ten_states = ten_next_states
self.states = ten_states
traj_state = torch.stack([item[0] for item in traj])
traj_other = torch.stack([item[1] for item in traj])
traj_list = [
(traj_state[:, env_i, :], traj_other[:, env_i, :])
for env_i in range(len(self.states))
]
return self.convert_trajectory(traj_list) # [traj_env_0, ...]
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple:
"""
Update the neural networks by sampling batch data from ``ReplayBuffer``.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param repeat_times: the re-using times of each trajectory.
:param soft_update_tau: the soft update parameter.
:return: a tuple of the log information.
"""
buffer.update_now_len()
obj_critic = q_value = None
for _ in range(int(buffer.now_len / batch_size * repeat_times)):
obj_critic, q_value = self.get_obj_critic(buffer, batch_size)
self.optim_update(self.cri_optim, obj_critic)
if self.if_use_cri_target:
self.soft_update(self.cri_target, self.cri, soft_update_tau)
return obj_critic.item(), q_value.mean().item()
def get_obj_critic_raw(self, buffer, batch_size):
"""
Calculate the loss of the network and predict Q values with **uniform sampling**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
reward, mask, action, state, next_s = buffer.sample_batch(batch_size)
next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0]
q_label = reward + mask * next_q
q_value = self.cri(state).gather(1, action.long())
obj_critic = self.criterion(q_value, q_label)
return obj_critic, q_value
def get_obj_critic_per(self, buffer, batch_size):
"""
Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
reward, mask, action, state, next_s, is_weights = buffer.sample_batch(
batch_size
)
next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0]
q_label = reward + mask * next_q
q_value = self.cri(state).gather(1, action.long())
td_error = self.criterion(
q_value, q_label
) # or td_error = (q_value - q_label).abs()
obj_critic = (td_error * is_weights).mean()
buffer.td_error_update(td_error.detach())
return obj_critic, q_value
class AgentDuelingDQN(AgentDQN): # [ElegantRL.2021.12.12]
"""
Bases: ``AgentDQN``
Dueling network.
"""
def __init__(self):
AgentDQN.__init__(self)
self.ClassCri = QNetDuel
|
py | 7dfb66fc5225de3e0473132c64277701c522edb5 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import BGLTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(BGLTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrase was called", self.nodes[0].walletpassphrase, 'ff', 1)
assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrasechange was called.", self.nodes[0].walletpassphrasechange, 'ff', 'ff')
# Encrypt the wallet
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].encryptwallet, '')
self.nodes[0].encryptwallet(passphrase)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
assert_raises_rpc_error(-15, "Error: running with an encrypted wallet, but encryptwallet was called.", self.nodes[0].encryptwallet, 'ff')
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrase, '', 1)
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrasechange, '', 'ff')
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(3)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main()
|
py | 7dfb6809b6bee85f929b6a9558b0ccd617fe7a31 | # coding:utf-8
from logging import getLogger
from peewee import Proxy
from playhouse.pool import PooledMySQLDatabase
from service import Service
__all__ = [ "DataBaseService" ]
DB_CONNECTION_MAX_NUM = 4
class PooledMySQLDatabaseWithReconnection(PooledMySQLDatabase):
"""Mysql connection pool support reconnection"""
def execute_sql(self, sql, params=None, require_commit=True):
"""override and support reconnect"""
log = getLogger('peewee.pool')
try:
return super(PooledMySQLDatabaseWithReconnection, self) \
.execute_sql(sql, params, require_commit)
except Exception as exe:
typeName = type( exe ).__name__
if typeName in ('OperationalError', ):
try:
log.info("try to close current connection")
if(not self.is_closed()):
log.info("try to close connection")
self.close()
log.info("try to re-execute current sql")
cursor = self.get_cursor()
cursor.execute(sql, params or ())
if require_commit and self.get_autocommit():
self.commit()
return cursor
except Exception as exc:
raise RuntimeError('reconnection failed: %s' \
% unicode( exc ))
raise
class DataBaseService(Service):
"""Manage all services"""
def __init__(self, env, host, port, user, passwd, db):
super(DataBaseService, self).__init__(env)
self._db_proxy = Proxy()
self._conn_info = dict(host=host, port=port, \
user=user, passwd=passwd, \
db=db)
def on_active(self):
super(DataBaseService, self).on_active()
conn_info = self._conn_info.copy()
db_name = conn_info.pop('db')
database = PooledMySQLDatabaseWithReconnection(
db_name,
max_connections=DB_CONNECTION_MAX_NUM,
stale_timeout=300,
threadlocals=True,
**conn_info
)
self._db_proxy.initialize( database )
self._db_proxy.connect()
def get_db(self):
return self._db_proxy |
py | 7dfb6888a52c4c7f82248f2d539271e9cc38b3ff | ################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""Unit test Library for the objects in decay module."""
from __future__ import division
import copy
import os
import sys
import time
import tests.unit_tests as unittest
import madgraph.core.base_objects as base_objects
import models.import_ufo as import_ufo
import models.model_reader as model_reader
_file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
#===============================================================================
# TestModelReader
#===============================================================================
class TestModelReader(unittest.TestCase):
"""Test class for the ModelReader object"""
def setUp(self):
"""Set up decay model"""
#Read the full SM
sm_path = import_ufo.find_ufo_path('sm')
self.base_model = import_ufo.import_model(sm_path)
self.model_reader = model_reader.ModelReader(self.base_model)
def test_set_parameters_and_couplings(self):
"""Test reading a param card"""
param_path = os.path.join(_file_path, '../input_files/param_card_sm.dat')
self.model_reader.set_parameters_and_couplings(os.path.join(param_path))
for param in sum([self.base_model.get('parameters')[key] for key \
in self.base_model.get('parameters')], []):
value = param.value
self.assertTrue(isinstance(value, (complex, float, int)))
self.assertTrue(isinstance(self.model_reader.get('parameter_dict')[\
param.name], (complex, float, int)))
for coupl in sum([self.base_model.get('couplings')[key] for key \
in self.base_model.get('couplings')], []):
value = coupl.value
self.assertTrue(isinstance(value, complex))
self.assertTrue(isinstance(self.model_reader.get('coupling_dict')[\
coupl.name], complex))
if __name__ == '__main__':
unittest.unittest.main()
|
py | 7dfb68d53a42c2ffe48a419952c6efc736ff586d | from peewee import *
from barcounter import confutils as conf
from . import db
DRINK_NAME_LENGTH = conf.limitation("drink_name_length")
class AbstractModel(Model):
class Meta:
database = db
class Person(AbstractModel):
uid = IntegerField()
server = IntegerField()
intoxication = IntegerField()
class Drink(AbstractModel):
server = IntegerField()
name = CharField(max_length=DRINK_NAME_LENGTH)
intoxication = IntegerField()
portion_size = IntegerField()
portions_per_day = IntegerField()
portions_left = IntegerField()
|
py | 7dfb6a87c3c972f54da48b129ad49452a66cef80 | """
looks for parameter values that are reflected in the response.
Author: maradrianbelen.com
The scan function will be called for request/response made via ZAP, excluding some of the automated tools
Passive scan rules should not make any requests
Note that new passive scripts will initially be disabled
Right click the script in the Scripts tree and select "enable"
Refactored & Improved by nil0x42
"""
# Set to True if you want to see results on a per param basis
# (i.e.: A single URL may be listed more than once)
RESULT_PER_FINDING = False
# Ignore parameters whose length is too short
MIN_PARAM_VALUE_LENGTH = 8
def scan(ps, msg, src):
# Docs on alert raising function:
# raiseAlert(int risk, int confidence, str name, str description, str uri,
# str param, str attack, str otherInfo, str solution,
# str evidence, int cweId, int wascId, HttpMessage msg)
# risk: 0: info, 1: low, 2: medium, 3: high
# confidence: 0: falsePositive, 1: low, 2: medium, 3: high, 4: confirmed
alert_title = "Reflected HTTP GET parameter(s) (script)"
alert_desc = ("Reflected parameter value has been found. "
"A reflected parameter values may introduce XSS "
"vulnerability or HTTP header injection.")
uri = header = body = None
reflected_params = []
for param in msg.getUrlParams():
value = param.getValue()
if len(value) < MIN_PARAM_VALUE_LENGTH:
continue
if not header:
uri = msg.getRequestHeader().getURI().toString()
header = msg.getResponseHeader().toString()
body = msg.getResponseBody().toString()
if value in header or value in body:
if RESULT_PER_FINDING:
param_name = param.getName()
ps.raiseAlert(0, 2, alert_title, alert_desc, uri, param_name,
None, None, None, value, 0, 0, msg)
else:
reflected_params.append(param.getName())
if reflected_params and not RESULT_PER_FINDING:
reflected_params = u",".join(reflected_params)
ps.raiseAlert(0, 2, alert_title, alert_desc, uri, reflected_params,
None, None, None, None, 0, 0, msg)
|
py | 7dfb6c861cd96bb4310cdb390c6bad5e18c5e7ea | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest, parameterized
import jax
from jax.config import config
import jax.dlpack
import jax.numpy as jnp
from jax import test_util as jtu
import numpy as np
config.parse_flags_with_absl()
try:
import torch
import torch.utils.dlpack
except ImportError:
torch = None
try:
import cupy
except ImportError:
cupy = None
try:
import tensorflow as tf
tf_version = tuple(
int(x) for x in tf.version.VERSION.split("-")[0].split("."))
except:
tf = None
dlpack_dtypes = sorted(list(jax.dlpack.SUPPORTED_DTYPES),
key=lambda x: x.__name__)
torch_dtypes = [jnp.int8, jnp.int16, jnp.int32, jnp.int64,
jnp.uint8, jnp.float16, jnp.float32, jnp.float64]
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (2, 3, 4)]
empty_array_shapes = []
empty_array_shapes += [(0,), (0, 4), (3, 0),]
nonempty_nonscalar_array_shapes += [(3, 1), (1, 4), (2, 1, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
all_shapes = nonempty_array_shapes + empty_array_shapes
class DLPackTest(jtu.JaxTestCase):
def setUp(self):
super(DLPackTest, self).setUp()
if jtu.device_under_test() == "tpu":
self.skipTest("DLPack not supported on TPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_take_ownership={}".format(
jtu.format_shape_dtype_string(shape, dtype),
take_ownership),
"shape": shape, "dtype": dtype, "take_ownership": take_ownership}
for shape in all_shapes
for dtype in dlpack_dtypes
for take_ownership in [False, True]))
def testJaxRoundTrip(self, shape, dtype, take_ownership):
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
dlpack = jax.dlpack.to_dlpack(x, take_ownership=take_ownership)
self.assertEqual(take_ownership, x.device_buffer.is_deleted())
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(np.astype(x.dtype), y)
self.assertRaisesRegex(RuntimeError,
"DLPack tensor may be consumed at most once",
lambda: jax.dlpack.from_dlpack(dlpack))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testTensorFlowToJax(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.uint64,
jnp.float64]:
raise self.skipTest("x64 types are disabled by jax_enable_x64")
if (jtu.device_under_test() == "gpu" and
not tf.config.list_physical_devices("GPU")):
raise self.skipTest("TensorFlow not configured with GPU support")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
with tf.device("/GPU:0" if jtu.device_under_test() == "gpu" else "/CPU:0"):
x = tf.constant(np)
dlpack = tf.experimental.dlpack.to_dlpack(x)
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testJaxToTensorFlow(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.uint64,
jnp.float64]:
self.skipTest("x64 types are disabled by jax_enable_x64")
if (jtu.device_under_test() == "gpu" and
not tf.config.list_physical_devices("GPU")):
raise self.skipTest("TensorFlow not configured with GPU support")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
# TODO(b/171320191): this line works around a missing context initialization
# bug in TensorFlow.
_ = tf.add(1, 1)
dlpack = jax.dlpack.to_dlpack(x)
y = tf.experimental.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y.numpy())
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in torch_dtypes))
@unittest.skipIf(not torch, "Test requires PyTorch")
def testTorchToJax(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.float64]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = torch.from_numpy(np)
x = x.cuda() if jtu.device_under_test() == "gpu" else x
dlpack = torch.utils.dlpack.to_dlpack(x)
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in torch_dtypes))
@unittest.skipIf(not torch, "Test requires PyTorch")
def testJaxToTorch(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.float64]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
dlpack = jax.dlpack.to_dlpack(x)
y = torch.utils.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y.cpu().numpy())
class CudaArrayInterfaceTest(jtu.JaxTestCase):
def setUp(self):
super(CudaArrayInterfaceTest, self).setUp()
if jtu.device_under_test() != "gpu":
self.skipTest("__cuda_array_interface__ is only supported on GPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
@unittest.skipIf(not cupy, "Test requires CuPy")
def testJaxToCuPy(self, shape, dtype):
rng = jtu.rand_default(self.rng())
x = rng(shape, dtype)
y = jnp.array(x)
z = cupy.asarray(y)
self.assertEqual(y.__cuda_array_interface__["data"][0],
z.__cuda_array_interface__["data"][0])
self.assertAllClose(x, cupy.asnumpy(z))
class Bfloat16Test(jtu.JaxTestCase):
@unittest.skipIf((not tf or tf_version < (2, 5, 0)),
"Test requires TensorFlow 2.5.0 or newer")
def testJaxAndTfHaveTheSameBfloat16Type(self):
self.assertEqual(np.dtype(jnp.bfloat16).num,
np.dtype(tf.dtypes.bfloat16.as_numpy_dtype).num)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
py | 7dfb6d7df425b97a27596fe2daf8ce3ba7674823 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'input_photos_ui.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import ui.rsc_rc
class Ui_insert(object):
def setupUi(self, insert):
if not insert.objectName():
insert.setObjectName(u"insert")
insert.resize(956, 671)
insert.setStyleSheet(u"QWidget#centralwidget\n"
"{\n"
"background-color: rgb(255, 255, 255);\n"
"}")
self.centralwidget = QWidget(insert)
self.centralwidget.setObjectName(u"centralwidget")
self.chazhao = QToolButton(self.centralwidget)
self.chazhao.setObjectName(u"chazhao")
self.chazhao.setGeometry(QRect(580, 150, 191, 121))
icon = QIcon()
icon.addFile(u":img/chazhao.png", QSize(), QIcon.Normal, QIcon.Off)
self.chazhao.setIcon(icon)
self.chazhao.setIconSize(QSize(240, 180))
self.chazhao.setAutoRaise(True)
self.out = QToolButton(self.centralwidget)
self.out.setObjectName(u"out")
self.out.setGeometry(QRect(750, 510, 201, 101))
icon1 = QIcon()
icon1.addFile(u":/img/fanhui.png", QSize(), QIcon.Normal, QIcon.Off)
self.out.setIcon(icon1)
self.out.setIconSize(QSize(147, 100))
self.out.setAutoRaise(True)
self.t1 = QLabel(self.centralwidget)
self.t1.setObjectName(u"t1")
self.t1.setGeometry(QRect(590, 50, 345, 75))
self.t1.setPixmap(QPixmap(u":/img/xuehao.png"))
self.stu_id = QLineEdit(self.centralwidget)
self.stu_id.setObjectName(u"stu_id")
self.stu_id.setGeometry(QRect(700, 70, 180, 33))
self.stu_id.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.stu_id.setFrame(False)
self.camera = QLabel(self.centralwidget)
self.camera.setObjectName(u"camera")
self.camera.setGeometry(QRect(20, 20, 551, 411))
self.camera.setFrameShape(QFrame.Box)
self.camera.setLineWidth(3)
self.camera.setPixmap(QPixmap(u":img/backgrand.png"))
self.camera.setAlignment(Qt.AlignCenter)
self.t3_2 = QLabel(self.centralwidget)
self.t3_2.setObjectName(u"t3_2")
self.t3_2.setGeometry(QRect(60, 460, 121, 35))
font = QFont()
font.setFamily(u"\u5fae\u8f6f\u96c5\u9ed1")
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.t3_2.setFont(font)
self.t3_2.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.chaxunjieguo = QLabel(self.centralwidget)
self.chaxunjieguo.setObjectName(u"chaxunjieguo")
self.chaxunjieguo.setGeometry(QRect(190, 460, 471, 35))
self.chaxunjieguo.setFont(font)
self.chaxunjieguo.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.stu_id_2 = QLabel(self.centralwidget)
self.stu_id_2.setObjectName(u"stu_id_2")
self.stu_id_2.setGeometry(QRect(130, 570, 221, 35))
self.stu_id_2.setFont(font)
self.stu_id_2.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.shibiejieguo = QLabel(self.centralwidget)
self.shibiejieguo.setObjectName(u"shibiejieguo")
self.shibiejieguo.setGeometry(QRect(20, 440, 721, 201))
self.shibiejieguo.setFrameShape(QFrame.Box)
self.shibiejieguo.setFrameShadow(QFrame.Raised)
self.shibiejieguo.setLineWidth(3)
self.t1_2 = QLabel(self.centralwidget)
self.t1_2.setObjectName(u"t1_2")
self.t1_2.setGeometry(QRect(370, 510, 75, 33))
self.t1_2.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.t2_2 = QLabel(self.centralwidget)
self.t2_2.setObjectName(u"t2_2")
self.t2_2.setGeometry(QRect(370, 570, 75, 33))
self.t2_2.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.t4 = QLabel(self.centralwidget)
self.t4.setObjectName(u"t4")
self.t4.setGeometry(QRect(60, 570, 65, 35))
self.t4.setFont(font)
self.t4.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.stu_age = QLabel(self.centralwidget)
self.stu_age.setObjectName(u"stu_age")
self.stu_age.setGeometry(QRect(440, 570, 221, 35))
self.stu_age.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.stu_major = QLabel(self.centralwidget)
self.stu_major.setObjectName(u"stu_major")
self.stu_major.setGeometry(QRect(440, 510, 221, 35))
self.stu_major.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.stu_name = QLabel(self.centralwidget)
self.stu_name.setObjectName(u"stu_name")
self.stu_name.setGeometry(QRect(130, 510, 221, 35))
self.stu_name.setFont(font)
self.stu_name.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.t3 = QLabel(self.centralwidget)
self.t3.setObjectName(u"t3")
self.t3.setGeometry(QRect(60, 510, 65, 35))
self.t3.setFont(font)
self.t3.setStyleSheet(u"font: 25px \"\u5fae\u8f6f\u96c5\u9ed1\";\n"
"color: rgb(112, 112, 112);")
self.lururenlian = QToolButton(self.centralwidget)
self.lururenlian.setObjectName(u"lururenlian")
self.lururenlian.setGeometry(QRect(770, 150, 181, 121))
icon2 = QIcon()
icon2.addFile(u":/img/lururenlian.png", QSize(), QIcon.Normal, QIcon.Off)
self.lururenlian.setIcon(icon2)
self.lururenlian.setIconSize(QSize(240, 180))
self.lururenlian.setAutoRaise(True)
self.paizhao = QToolButton(self.centralwidget)
self.paizhao.setObjectName(u"paizhao")
self.paizhao.setGeometry(QRect(590, 270, 181, 121))
icon3 = QIcon()
icon3.addFile(u":/img/paizhao.png", QSize(), QIcon.Normal, QIcon.Off)
self.paizhao.setIcon(icon3)
self.paizhao.setIconSize(QSize(240, 180))
self.paizhao.setAutoRaise(True)
self.luru = QToolButton(self.centralwidget)
self.luru.setObjectName(u"luru")
self.luru.setGeometry(QRect(770, 270, 181, 121))
icon4 = QIcon()
icon4.addFile(u":/img/luru.png", QSize(), QIcon.Normal, QIcon.Off)
self.luru.setIcon(icon4)
self.luru.setIconSize(QSize(240, 180))
self.luru.setAutoRaise(True)
insert.setCentralWidget(self.centralwidget)
self.shibiejieguo.raise_()
self.chazhao.raise_()
self.out.raise_()
self.t1.raise_()
self.stu_id.raise_()
self.camera.raise_()
self.t3_2.raise_()
self.chaxunjieguo.raise_()
self.stu_id_2.raise_()
self.t1_2.raise_()
self.t2_2.raise_()
self.t4.raise_()
self.stu_age.raise_()
self.stu_major.raise_()
self.stu_name.raise_()
self.t3.raise_()
self.lururenlian.raise_()
self.paizhao.raise_()
self.luru.raise_()
self.retranslateUi(insert)
QMetaObject.connectSlotsByName(insert)
# setupUi
def retranslateUi(self, insert):
insert.setWindowTitle(QCoreApplication.translate("insert", u"MainWindow", None))
self.chazhao.setText("")
self.out.setText("")
self.t1.setText("")
self.stu_id.setText("")
self.stu_id.setPlaceholderText(QCoreApplication.translate("insert", u"\u8bf7\u8f93\u5165\u5b66\u53f7", None))
self.camera.setText("")
self.t3_2.setText(QCoreApplication.translate("insert", u"\u67e5\u8be2\u7ed3\u679c\uff1a", None))
self.chaxunjieguo.setText("")
self.stu_id_2.setText("")
self.shibiejieguo.setText("")
self.t1_2.setText(QCoreApplication.translate("insert", u"\u4e13\u4e1a\uff1a", None))
self.t2_2.setText(QCoreApplication.translate("insert", u"\u5e74\u9f84\uff1a", None))
self.t4.setText(QCoreApplication.translate("insert", u"\u5b66\u53f7\uff1a", None))
self.stu_age.setText("")
self.stu_major.setText("")
self.stu_name.setText("")
self.t3.setText(QCoreApplication.translate("insert", u"\u59d3\u540d\uff1a", None))
self.lururenlian.setText("")
self.paizhao.setText("")
self.luru.setText("")
# retranslateUi
|
py | 7dfb6e5da83c0b979eeabba63ff64ff936dacac0 | import numpy as np
from copy import copy
from si.data.dataset import Dataset
class StandardScaler:
"""
Standardize features by centering the mean to 0 and unit variance.
The standard score of an instance is calculated by:
z = (x - u) / s
where u is the mean of the training data and s is the standard deviation.
Standardizing data is often necessary before training many machine
learning models to avoid problems like exploding/vanishing gradients and
feature dominance.
Attributes
----------
_mean : numpy array of shape (n_features, )
The mean of each feature in the training set.
_var : numpy array of shape (n_features, )
The variance of each feature in the training set.
"""
def __init__(self):
pass
def fit(self, dataset):
"""
Calculate and store the mean and variance of each feature in the
training set.
Parameters
----------
dataset : A Dataset object to be standardized
"""
self._mean = np.mean(dataset.X, axis=0)
self._var = np.var(dataset.X, axis=0)
def transform(self, dataset1, inline=False):
"""
Standardize data by subtracting out the mean and dividing by
standard deviation calculated during fitting.
Parameters
----------
dataset : A Dataset object to be standardized
Returns
-------
A Dataset object with standardized data.
"""
Z = (dataset1.X - self._mean) / np.sqrt(self._var)
if inline:
dataset1.X = Z
return dataset1
else:
return Dataset(Z, copy(dataset1.y), copy(dataset1._xnames), copy(dataset1._yname))
def fit_transform(self, dataset, inline=False):
"""
Calculate and store the mean and variance of each feature and
standardize the data.
Parameters
----------
dataset : A Dataset object to be standardized
Returns
-------
A Dataset object to with standardized data.
"""
self.fit(dataset)
return self.transform(dataset, inline=inline)
def inverse_transform(self, dataset, inline=False):
"""
Transform data back into orginal state by multiplying by standard
deviation and adding the mean back in.
Inverse standard scaler:
x = z * s + u
where s is the standard deviation, and u is the mean.
Parameters
----------
dataset : A standardized Dataset object
Returns
-------
Dataset object
"""
X = dataset.X * np.sqrt(self._var) + self._mean
if inline:
dataset.X = X
return dataset
else:
from ..data import Dataset
return Dataset(X,
copy(dataset.y),
copy(dataset._xnames),
copy(dataset._yname))
|
py | 7dfb70b5a600a3f66d1595c43e0840acf6558c59 | builtins_test_text_001 = '''
arg = 10
int = 10
'''
builtins_test_text_002 = '''
def function_one():
pass
def function_two(
arg,
):
pass
def function_three(
int,
):
pass
async def function_four():
pass
async def function_five(
arg,
):
pass
async def function_six(
int,
):
pass
def int():
pass
'''
builtins_test_text_003 = '''
for i in range(10):
pass
for int in range(10):
pass
for a, b in range(10):
pass
for a, int in range(10):
pass
for *a, b in [[1,2,3]]:
pass
for *int, b in [[1,2,3]]:
pass
'''
builtins_test_text_004 = '''
with func():
pass
with func() as var:
pass
with func() as int:
pass
with func_one() as var_one, func_two() as var_two:
pass
with func_one() as int, func_two() as var_two:
pass
with func_one() as var_one, func_two() as int:
pass
with func() as (var_one, var_two):
pass
with func() as (var_one, int):
pass
'''
builtins_test_text_005 = '''
var = [
var_one
for var_one in var_list
]
var = [
int
for int in var_list
]
var = [
(var_one, var_two)
for (var_one, var_two) in var_list
]
var = [
(var_one, int)
for (var_one, int) in var_list
]
'''
builtins_test_text_006 = '''
class ClassName:
pass
class dict:
pass
class SomeClass:
type = 'some'
def method(
self,
):
pass
def filter(
self,
):
pass
'''
|
py | 7dfb70c48cee595a7610549bdf8338e64bd31d8b | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from datetime import datetime, timedelta
from typing import List, Optional
from unittest.mock import Mock, patch
from uuid import uuid4
import pytest
from contextlib2 import contextmanager
from flask_sqlalchemy import BaseQuery
from freezegun import freeze_time
from sqlalchemy.sql import func
from superset import db
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.reports import (
ReportExecutionLog,
ReportRecipients,
ReportRecipientType,
ReportSchedule,
ReportScheduleType,
ReportScheduleValidatorType,
ReportState,
)
from superset.models.slice import Slice
from superset.reports.commands.exceptions import (
AlertQueryError,
AlertQueryInvalidTypeError,
AlertQueryMultipleColumnsError,
AlertQueryMultipleRowsError,
ReportScheduleNotFoundError,
ReportScheduleNotificationError,
ReportSchedulePreviousWorkingError,
ReportScheduleScreenshotFailedError,
ReportScheduleScreenshotTimeout,
ReportScheduleWorkingTimeoutError,
)
from superset.reports.commands.execute import AsyncExecuteReportScheduleCommand
from superset.utils.core import get_example_database
from tests.fixtures.birth_names_dashboard import load_birth_names_dashboard_with_slices
from tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices_module_scope,
)
from tests.reports.utils import insert_report_schedule
from tests.test_app import app
from tests.utils import read_fixture
pytestmark = pytest.mark.usefixtures(
"load_world_bank_dashboard_with_slices_module_scope"
)
test_id = str(uuid4())
def get_target_from_report_schedule(report_schedule: ReportSchedule) -> List[str]:
return [
json.loads(recipient.recipient_config_json)["target"]
for recipient in report_schedule.recipients
]
def get_error_logs_query(report_schedule: ReportSchedule) -> BaseQuery:
return (
db.session.query(ReportExecutionLog)
.filter(
ReportExecutionLog.report_schedule == report_schedule,
ReportExecutionLog.state == ReportState.ERROR,
)
.order_by(ReportExecutionLog.end_dttm.desc())
)
def get_notification_error_sent_count(report_schedule: ReportSchedule) -> int:
logs = get_error_logs_query(report_schedule).all()
notification_sent_logs = [
log.error_message
for log in logs
if log.error_message == "Notification sent with error"
]
return len(notification_sent_logs)
def assert_log(state: str, error_message: Optional[str] = None):
db.session.commit()
logs = db.session.query(ReportExecutionLog).all()
if state == ReportState.WORKING:
assert len(logs) == 2
assert logs[1].error_message == error_message
assert logs[1].state == state
return
# On error we send an email
if state == ReportState.ERROR:
assert len(logs) == 3
else:
assert len(logs) == 2
log_states = [log.state for log in logs]
assert ReportState.WORKING in log_states
assert state in log_states
assert error_message in [log.error_message for log in logs]
def create_report_notification(
email_target: Optional[str] = None,
slack_channel: Optional[str] = None,
chart: Optional[Slice] = None,
dashboard: Optional[Dashboard] = None,
database: Optional[Database] = None,
sql: Optional[str] = None,
report_type: Optional[str] = None,
validator_type: Optional[str] = None,
validator_config_json: Optional[str] = None,
grace_period: Optional[int] = None,
) -> ReportSchedule:
report_type = report_type or ReportScheduleType.REPORT
target = email_target or slack_channel
config_json = {"target": target}
if slack_channel:
recipient = ReportRecipients(
type=ReportRecipientType.SLACK,
recipient_config_json=json.dumps(config_json),
)
else:
recipient = ReportRecipients(
type=ReportRecipientType.EMAIL,
recipient_config_json=json.dumps(config_json),
)
report_schedule = insert_report_schedule(
type=report_type,
name=f"report",
crontab=f"0 9 * * *",
description=f"Daily report",
sql=sql,
chart=chart,
dashboard=dashboard,
database=database,
recipients=[recipient],
validator_type=validator_type,
validator_config_json=validator_config_json,
grace_period=grace_period,
)
return report_schedule
def cleanup_report_schedule(report_schedule: ReportSchedule) -> None:
db.session.query(ReportExecutionLog).filter(
ReportExecutionLog.report_schedule == report_schedule
).delete()
db.session.query(ReportRecipients).filter(
ReportRecipients.report_schedule == report_schedule
).delete()
db.session.delete(report_schedule)
db.session.commit()
@contextmanager
def create_test_table_context(database: Database):
database.get_sqla_engine().execute(
"CREATE TABLE test_table AS SELECT 1 as first, 2 as second"
)
database.get_sqla_engine().execute(
"INSERT INTO test_table (first, second) VALUES (1, 2)"
)
database.get_sqla_engine().execute(
"INSERT INTO test_table (first, second) VALUES (3, 4)"
)
yield db.session
database.get_sqla_engine().execute("DROP TABLE test_table")
@pytest.yield_fixture()
def create_report_email_chart():
with app.app_context():
chart = db.session.query(Slice).first()
report_schedule = create_report_notification(
email_target="[email protected]", chart=chart
)
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture()
def create_report_email_dashboard():
with app.app_context():
dashboard = db.session.query(Dashboard).first()
report_schedule = create_report_notification(
email_target="[email protected]", dashboard=dashboard
)
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture()
def create_report_slack_chart():
with app.app_context():
chart = db.session.query(Slice).first()
report_schedule = create_report_notification(
slack_channel="slack_channel", chart=chart
)
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture()
def create_report_slack_chart_working():
with app.app_context():
chart = db.session.query(Slice).first()
report_schedule = create_report_notification(
slack_channel="slack_channel", chart=chart
)
report_schedule.last_state = ReportState.WORKING
report_schedule.last_eval_dttm = datetime(2020, 1, 1, 0, 0)
db.session.commit()
log = ReportExecutionLog(
scheduled_dttm=report_schedule.last_eval_dttm,
start_dttm=report_schedule.last_eval_dttm,
end_dttm=report_schedule.last_eval_dttm,
state=ReportState.WORKING,
report_schedule=report_schedule,
uuid=uuid4(),
)
db.session.add(log)
db.session.commit()
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture()
def create_alert_slack_chart_success():
with app.app_context():
chart = db.session.query(Slice).first()
report_schedule = create_report_notification(
slack_channel="slack_channel",
chart=chart,
report_type=ReportScheduleType.ALERT,
)
report_schedule.last_state = ReportState.SUCCESS
report_schedule.last_eval_dttm = datetime(2020, 1, 1, 0, 0)
log = ReportExecutionLog(
report_schedule=report_schedule,
state=ReportState.SUCCESS,
start_dttm=report_schedule.last_eval_dttm,
end_dttm=report_schedule.last_eval_dttm,
scheduled_dttm=report_schedule.last_eval_dttm,
)
db.session.add(log)
db.session.commit()
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture()
def create_alert_slack_chart_grace():
with app.app_context():
chart = db.session.query(Slice).first()
report_schedule = create_report_notification(
slack_channel="slack_channel",
chart=chart,
report_type=ReportScheduleType.ALERT,
)
report_schedule.last_state = ReportState.GRACE
report_schedule.last_eval_dttm = datetime(2020, 1, 1, 0, 0)
log = ReportExecutionLog(
report_schedule=report_schedule,
state=ReportState.SUCCESS,
start_dttm=report_schedule.last_eval_dttm,
end_dttm=report_schedule.last_eval_dttm,
scheduled_dttm=report_schedule.last_eval_dttm,
)
db.session.add(log)
db.session.commit()
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture(
params=["alert1", "alert2", "alert3", "alert4", "alert5", "alert6", "alert7",]
)
def create_alert_email_chart(request):
param_config = {
"alert1": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": ">", "threshold": 9}',
},
"alert2": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": ">=", "threshold": 10}',
},
"alert3": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<", "threshold": 11}',
},
"alert4": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<=", "threshold": 10}',
},
"alert5": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "!=", "threshold": 11}',
},
"alert6": {
"sql": "SELECT 'something' as metric",
"validator_type": ReportScheduleValidatorType.NOT_NULL,
"validator_config_json": "{}",
},
"alert7": {
"sql": "SELECT {{ 5 + 5 }} as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "!=", "threshold": 11}',
},
}
with app.app_context():
chart = db.session.query(Slice).first()
example_database = get_example_database()
with create_test_table_context(example_database):
report_schedule = create_report_notification(
email_target="[email protected]",
chart=chart,
report_type=ReportScheduleType.ALERT,
database=example_database,
sql=param_config[request.param]["sql"],
validator_type=param_config[request.param]["validator_type"],
validator_config_json=param_config[request.param][
"validator_config_json"
],
)
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture(
params=[
"alert1",
"alert2",
"alert3",
"alert4",
"alert5",
"alert6",
"alert7",
"alert8",
"alert9",
]
)
def create_no_alert_email_chart(request):
param_config = {
"alert1": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<", "threshold": 10}',
},
"alert2": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": ">=", "threshold": 11}',
},
"alert3": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<", "threshold": 10}',
},
"alert4": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<=", "threshold": 9}',
},
"alert5": {
"sql": "SELECT 10 as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "!=", "threshold": 10}',
},
"alert6": {
"sql": "SELECT first from test_table where 1=0",
"validator_type": ReportScheduleValidatorType.NOT_NULL,
"validator_config_json": "{}",
},
"alert7": {
"sql": "SELECT first from test_table where 1=0",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": ">", "threshold": 0}',
},
"alert8": {
"sql": "SELECT Null as metric",
"validator_type": ReportScheduleValidatorType.NOT_NULL,
"validator_config_json": "{}",
},
"alert9": {
"sql": "SELECT Null as metric",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": ">", "threshold": 0}',
},
}
with app.app_context():
chart = db.session.query(Slice).first()
example_database = get_example_database()
with create_test_table_context(example_database):
report_schedule = create_report_notification(
email_target="[email protected]",
chart=chart,
report_type=ReportScheduleType.ALERT,
database=example_database,
sql=param_config[request.param]["sql"],
validator_type=param_config[request.param]["validator_type"],
validator_config_json=param_config[request.param][
"validator_config_json"
],
)
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture(params=["alert1", "alert2"])
def create_mul_alert_email_chart(request):
param_config = {
"alert1": {
"sql": "SELECT first, second from test_table",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<", "threshold": 10}',
},
"alert2": {
"sql": "SELECT first from test_table",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<", "threshold": 10}',
},
}
with app.app_context():
chart = db.session.query(Slice).first()
example_database = get_example_database()
with create_test_table_context(example_database):
report_schedule = create_report_notification(
email_target="[email protected]",
chart=chart,
report_type=ReportScheduleType.ALERT,
database=example_database,
sql=param_config[request.param]["sql"],
validator_type=param_config[request.param]["validator_type"],
validator_config_json=param_config[request.param][
"validator_config_json"
],
)
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.yield_fixture(params=["alert1", "alert2"])
def create_invalid_sql_alert_email_chart(request):
param_config = {
"alert1": {
"sql": "SELECT 'string' ",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<", "threshold": 10}',
},
"alert2": {
"sql": "SELECT first from foo_table",
"validator_type": ReportScheduleValidatorType.OPERATOR,
"validator_config_json": '{"op": "<", "threshold": 10}',
},
}
with app.app_context():
chart = db.session.query(Slice).first()
example_database = get_example_database()
with create_test_table_context(example_database):
report_schedule = create_report_notification(
email_target="[email protected]",
chart=chart,
report_type=ReportScheduleType.ALERT,
database=example_database,
sql=param_config[request.param]["sql"],
validator_type=param_config[request.param]["validator_type"],
validator_config_json=param_config[request.param][
"validator_config_json"
],
grace_period=60 * 60,
)
yield report_schedule
cleanup_report_schedule(report_schedule)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_report_email_chart"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
def test_email_chart_report_schedule(
screenshot_mock, email_mock, create_report_email_chart
):
"""
ExecuteReport Command: Test chart email report schedule
"""
# setup screenshot mock
screenshot = read_fixture("sample.png")
screenshot_mock.return_value = screenshot
with freeze_time("2020-01-01T00:00:00Z"):
AsyncExecuteReportScheduleCommand(
test_id, create_report_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(
create_report_email_chart
)
# assert that the link sent is correct
assert (
f'<a href="http://0.0.0.0:8080/superset/slice/'
f'{create_report_email_chart.chart.id}/">Explore in Superset</a>'
in email_mock.call_args[0][2]
)
# Assert the email smtp address
assert email_mock.call_args[0][0] == notification_targets[0]
# Assert the email inline screenshot
smtp_images = email_mock.call_args[1]["images"]
assert smtp_images[list(smtp_images.keys())[0]] == screenshot
# Assert logs are correct
assert_log(ReportState.SUCCESS)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_report_email_dashboard"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.DashboardScreenshot.get_screenshot")
def test_email_dashboard_report_schedule(
screenshot_mock, email_mock, create_report_email_dashboard
):
"""
ExecuteReport Command: Test dashboard email report schedule
"""
# setup screenshot mock
screenshot = read_fixture("sample.png")
screenshot_mock.return_value = screenshot
with freeze_time("2020-01-01T00:00:00Z"):
AsyncExecuteReportScheduleCommand(
test_id, create_report_email_dashboard.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(
create_report_email_dashboard
)
# Assert the email smtp address
assert email_mock.call_args[0][0] == notification_targets[0]
# Assert the email inline screenshot
smtp_images = email_mock.call_args[1]["images"]
assert smtp_images[list(smtp_images.keys())[0]] == screenshot
# Assert logs are correct
assert_log(ReportState.SUCCESS)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_report_slack_chart"
)
@patch("superset.reports.notifications.slack.WebClient.files_upload")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
def test_slack_chart_report_schedule(
screenshot_mock, file_upload_mock, create_report_slack_chart
):
"""
ExecuteReport Command: Test chart slack report schedule
"""
# setup screenshot mock
screenshot = read_fixture("sample.png")
screenshot_mock.return_value = screenshot
with freeze_time("2020-01-01T00:00:00Z"):
AsyncExecuteReportScheduleCommand(
test_id, create_report_slack_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(
create_report_slack_chart
)
assert file_upload_mock.call_args[1]["channels"] == notification_targets[0]
assert file_upload_mock.call_args[1]["file"] == screenshot
# Assert logs are correct
assert_log(ReportState.SUCCESS)
@pytest.mark.usefixtures("create_report_slack_chart")
def test_report_schedule_not_found(create_report_slack_chart):
"""
ExecuteReport Command: Test report schedule not found
"""
max_id = db.session.query(func.max(ReportSchedule.id)).scalar()
with pytest.raises(ReportScheduleNotFoundError):
AsyncExecuteReportScheduleCommand(test_id, max_id + 1, datetime.utcnow()).run()
@pytest.mark.usefixtures("create_report_slack_chart_working")
def test_report_schedule_working(create_report_slack_chart_working):
"""
ExecuteReport Command: Test report schedule still working
"""
# setup screenshot mock
with freeze_time("2020-01-01T00:00:00Z"):
with pytest.raises(ReportSchedulePreviousWorkingError):
AsyncExecuteReportScheduleCommand(
test_id, create_report_slack_chart_working.id, datetime.utcnow()
).run()
assert_log(
ReportState.WORKING,
error_message=ReportSchedulePreviousWorkingError.message,
)
assert create_report_slack_chart_working.last_state == ReportState.WORKING
@pytest.mark.usefixtures("create_report_slack_chart_working")
def test_report_schedule_working_timeout(create_report_slack_chart_working):
"""
ExecuteReport Command: Test report schedule still working but should timed out
"""
current_time = create_report_slack_chart_working.last_eval_dttm + timedelta(
seconds=create_report_slack_chart_working.working_timeout + 1
)
with freeze_time(current_time):
with pytest.raises(ReportScheduleWorkingTimeoutError):
AsyncExecuteReportScheduleCommand(
test_id, create_report_slack_chart_working.id, datetime.utcnow()
).run()
# Only needed for MySQL, understand why
db.session.commit()
logs = db.session.query(ReportExecutionLog).all()
# Two logs, first is created by fixture
assert len(logs) == 2
assert logs[1].error_message == ReportScheduleWorkingTimeoutError.message
assert logs[1].state == ReportState.ERROR
assert create_report_slack_chart_working.last_state == ReportState.ERROR
@pytest.mark.usefixtures("create_alert_slack_chart_success")
def test_report_schedule_success_grace(create_alert_slack_chart_success):
"""
ExecuteReport Command: Test report schedule on success to grace
"""
# set current time to within the grace period
current_time = create_alert_slack_chart_success.last_eval_dttm + timedelta(
seconds=create_alert_slack_chart_success.grace_period - 10
)
with freeze_time(current_time):
AsyncExecuteReportScheduleCommand(
test_id, create_alert_slack_chart_success.id, datetime.utcnow()
).run()
db.session.commit()
assert create_alert_slack_chart_success.last_state == ReportState.GRACE
@pytest.mark.usefixtures("create_alert_slack_chart_grace")
def test_report_schedule_success_grace_end(create_alert_slack_chart_grace):
"""
ExecuteReport Command: Test report schedule on grace to noop
"""
# set current time to within the grace period
current_time = create_alert_slack_chart_grace.last_eval_dttm + timedelta(
seconds=create_alert_slack_chart_grace.grace_period + 1
)
with freeze_time(current_time):
AsyncExecuteReportScheduleCommand(
test_id, create_alert_slack_chart_grace.id, datetime.utcnow()
).run()
db.session.commit()
assert create_alert_slack_chart_grace.last_state == ReportState.NOOP
@pytest.mark.usefixtures("create_alert_email_chart")
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
def test_alert_limit_is_applied(screenshot_mock, email_mock, create_alert_email_chart):
"""
ExecuteReport Command: Test that all alerts apply a SQL limit to stmts
"""
with patch.object(
create_alert_email_chart.database.db_engine_spec, "execute", return_value=None
) as execute_mock:
with patch.object(
create_alert_email_chart.database.db_engine_spec,
"fetch_data",
return_value=None,
) as fetch_data_mock:
AsyncExecuteReportScheduleCommand(
test_id, create_alert_email_chart.id, datetime.utcnow()
).run()
assert "LIMIT 2" in execute_mock.call_args[0][1]
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_report_email_dashboard"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.DashboardScreenshot.get_screenshot")
def test_email_dashboard_report_fails(
screenshot_mock, email_mock, create_report_email_dashboard
):
"""
ExecuteReport Command: Test dashboard email report schedule notification fails
"""
# setup screenshot mock
from smtplib import SMTPException
screenshot = read_fixture("sample.png")
screenshot_mock.return_value = screenshot
email_mock.side_effect = SMTPException("Could not connect to SMTP XPTO")
with pytest.raises(ReportScheduleNotificationError):
AsyncExecuteReportScheduleCommand(
test_id, create_report_email_dashboard.id, datetime.utcnow()
).run()
assert_log(ReportState.ERROR, error_message="Could not connect to SMTP XPTO")
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_alert_email_chart"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
ALERTS_ATTACH_REPORTS=True,
)
def test_slack_chart_alert(screenshot_mock, email_mock, create_alert_email_chart):
"""
ExecuteReport Command: Test chart slack alert
"""
# setup screenshot mock
screenshot = read_fixture("sample.png")
screenshot_mock.return_value = screenshot
with freeze_time("2020-01-01T00:00:00Z"):
AsyncExecuteReportScheduleCommand(
test_id, create_alert_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(create_alert_email_chart)
# Assert the email smtp address
assert email_mock.call_args[0][0] == notification_targets[0]
# Assert the email inline screenshot
smtp_images = email_mock.call_args[1]["images"]
assert smtp_images[list(smtp_images.keys())[0]] == screenshot
# Assert logs are correct
assert_log(ReportState.SUCCESS)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_alert_email_chart"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
ALERTS_ATTACH_REPORTS=False,
)
def test_slack_chart_alert_no_attachment(email_mock, create_alert_email_chart):
"""
ExecuteReport Command: Test chart slack alert
"""
# setup screenshot mock
with freeze_time("2020-01-01T00:00:00Z"):
AsyncExecuteReportScheduleCommand(
test_id, create_alert_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(create_alert_email_chart)
# Assert the email smtp address
assert email_mock.call_args[0][0] == notification_targets[0]
# Assert the there is no attached image
assert email_mock.call_args[1]["images"] is None
# Assert logs are correct
assert_log(ReportState.SUCCESS)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_report_slack_chart"
)
@patch("superset.reports.notifications.slack.WebClient")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
def test_slack_token_callable_chart_report(
screenshot_mock, slack_client_mock_class, create_report_slack_chart
):
"""
ExecuteReport Command: Test chart slack alert (slack token callable)
"""
slack_client_mock_class.return_value = Mock()
app.config["SLACK_API_TOKEN"] = Mock(return_value="cool_code")
# setup screenshot mock
screenshot = read_fixture("sample.png")
screenshot_mock.return_value = screenshot
with freeze_time("2020-01-01T00:00:00Z"):
AsyncExecuteReportScheduleCommand(
test_id, create_report_slack_chart.id, datetime.utcnow()
).run()
app.config["SLACK_API_TOKEN"].assert_called_once()
assert slack_client_mock_class.called_with(token="cool_code", proxy="")
assert_log(ReportState.SUCCESS)
@pytest.mark.usefixtures("create_no_alert_email_chart")
def test_email_chart_no_alert(create_no_alert_email_chart):
"""
ExecuteReport Command: Test chart email no alert
"""
with freeze_time("2020-01-01T00:00:00Z"):
AsyncExecuteReportScheduleCommand(
test_id, create_no_alert_email_chart.id, datetime.utcnow()
).run()
assert_log(ReportState.NOOP)
@pytest.mark.usefixtures("create_mul_alert_email_chart")
def test_email_mul_alert(create_mul_alert_email_chart):
"""
ExecuteReport Command: Test chart email multiple rows
"""
with freeze_time("2020-01-01T00:00:00Z"):
with pytest.raises(
(AlertQueryMultipleRowsError, AlertQueryMultipleColumnsError)
):
AsyncExecuteReportScheduleCommand(
test_id, create_mul_alert_email_chart.id, datetime.utcnow()
).run()
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_alert_email_chart"
)
@patch("superset.reports.notifications.email.send_email_smtp")
def test_soft_timeout_alert(email_mock, create_alert_email_chart):
"""
ExecuteReport Command: Test soft timeout on alert queries
"""
from celery.exceptions import SoftTimeLimitExceeded
from superset.reports.commands.exceptions import AlertQueryTimeout
with patch.object(
create_alert_email_chart.database.db_engine_spec, "execute", return_value=None
) as execute_mock:
execute_mock.side_effect = SoftTimeLimitExceeded()
with pytest.raises(AlertQueryTimeout):
AsyncExecuteReportScheduleCommand(
test_id, create_alert_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(create_alert_email_chart)
# Assert the email smtp address, asserts a notification was sent with the error
assert email_mock.call_args[0][0] == notification_targets[0]
assert_log(
ReportState.ERROR, error_message="A timeout occurred while executing the query."
)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_alert_email_chart"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
ALERTS_ATTACH_REPORTS=True,
)
def test_soft_timeout_screenshot(screenshot_mock, email_mock, create_alert_email_chart):
"""
ExecuteReport Command: Test soft timeout on screenshot
"""
from celery.exceptions import SoftTimeLimitExceeded
from superset.reports.commands.exceptions import AlertQueryTimeout
screenshot_mock.side_effect = SoftTimeLimitExceeded()
with pytest.raises(ReportScheduleScreenshotTimeout):
AsyncExecuteReportScheduleCommand(
test_id, create_alert_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(create_alert_email_chart)
# Assert the email smtp address, asserts a notification was sent with the error
assert email_mock.call_args[0][0] == notification_targets[0]
assert_log(
ReportState.ERROR, error_message="A timeout occurred while taking a screenshot."
)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_report_email_chart"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
def test_fail_screenshot(screenshot_mock, email_mock, create_report_email_chart):
"""
ExecuteReport Command: Test soft timeout on screenshot
"""
from celery.exceptions import SoftTimeLimitExceeded
from superset.reports.commands.exceptions import AlertQueryTimeout
screenshot_mock.side_effect = Exception("Unexpected error")
with pytest.raises(ReportScheduleScreenshotFailedError):
AsyncExecuteReportScheduleCommand(
test_id, create_report_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(create_report_email_chart)
# Assert the email smtp address, asserts a notification was sent with the error
assert email_mock.call_args[0][0] == notification_targets[0]
assert_log(
ReportState.ERROR, error_message="Failed taking a screenshot Unexpected error"
)
@pytest.mark.usefixtures(
"load_birth_names_dashboard_with_slices", "create_alert_email_chart"
)
@patch("superset.reports.notifications.email.send_email_smtp")
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
ALERTS_ATTACH_REPORTS=False,
)
def test_email_disable_screenshot(email_mock, create_alert_email_chart):
"""
ExecuteReport Command: Test soft timeout on screenshot
"""
AsyncExecuteReportScheduleCommand(
test_id, create_alert_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(create_alert_email_chart)
# Assert the email smtp address, asserts a notification was sent with the error
assert email_mock.call_args[0][0] == notification_targets[0]
# Assert the there is no attached image
assert email_mock.call_args[1]["images"] is None
assert_log(ReportState.SUCCESS)
@pytest.mark.usefixtures("create_invalid_sql_alert_email_chart")
@patch("superset.reports.notifications.email.send_email_smtp")
def test_invalid_sql_alert(email_mock, create_invalid_sql_alert_email_chart):
"""
ExecuteReport Command: Test alert with invalid SQL statements
"""
with freeze_time("2020-01-01T00:00:00Z"):
with pytest.raises((AlertQueryError, AlertQueryInvalidTypeError)):
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
notification_targets = get_target_from_report_schedule(
create_invalid_sql_alert_email_chart
)
# Assert the email smtp address, asserts a notification was sent with the error
assert email_mock.call_args[0][0] == notification_targets[0]
@pytest.mark.usefixtures("create_invalid_sql_alert_email_chart")
@patch("superset.reports.notifications.email.send_email_smtp")
def test_grace_period_error(email_mock, create_invalid_sql_alert_email_chart):
"""
ExecuteReport Command: Test alert grace period on error
"""
with freeze_time("2020-01-01T00:00:00Z"):
with pytest.raises((AlertQueryError, AlertQueryInvalidTypeError)):
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
# Only needed for MySQL, understand why
db.session.commit()
notification_targets = get_target_from_report_schedule(
create_invalid_sql_alert_email_chart
)
# Assert the email smtp address, asserts a notification was sent with the error
assert email_mock.call_args[0][0] == notification_targets[0]
assert (
get_notification_error_sent_count(create_invalid_sql_alert_email_chart) == 1
)
with freeze_time("2020-01-01T00:30:00Z"):
with pytest.raises((AlertQueryError, AlertQueryInvalidTypeError)):
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
db.session.commit()
assert (
get_notification_error_sent_count(create_invalid_sql_alert_email_chart) == 1
)
# Grace period ends, assert a notification was sent
with freeze_time("2020-01-01T01:30:00Z"):
with pytest.raises((AlertQueryError, AlertQueryInvalidTypeError)):
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
db.session.commit()
assert (
get_notification_error_sent_count(create_invalid_sql_alert_email_chart) == 2
)
@pytest.mark.usefixtures("create_invalid_sql_alert_email_chart")
@patch("superset.reports.notifications.email.send_email_smtp")
@patch("superset.utils.screenshots.ChartScreenshot.get_screenshot")
def test_grace_period_error_flap(
screenshot_mock, email_mock, create_invalid_sql_alert_email_chart
):
"""
ExecuteReport Command: Test alert grace period on error
"""
with freeze_time("2020-01-01T00:00:00Z"):
with pytest.raises((AlertQueryError, AlertQueryInvalidTypeError)):
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
db.session.commit()
# Assert we have 1 notification sent on the log
assert (
get_notification_error_sent_count(create_invalid_sql_alert_email_chart) == 1
)
with freeze_time("2020-01-01T00:30:00Z"):
with pytest.raises((AlertQueryError, AlertQueryInvalidTypeError)):
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
db.session.commit()
assert (
get_notification_error_sent_count(create_invalid_sql_alert_email_chart) == 1
)
# Change report_schedule to valid
create_invalid_sql_alert_email_chart.sql = "SELECT 1 AS metric"
create_invalid_sql_alert_email_chart.grace_period = 0
db.session.merge(create_invalid_sql_alert_email_chart)
db.session.commit()
with freeze_time("2020-01-01T00:31:00Z"):
# One success
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
# Grace period ends
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
db.session.commit()
create_invalid_sql_alert_email_chart.sql = "SELECT 'first'"
create_invalid_sql_alert_email_chart.grace_period = 10
db.session.merge(create_invalid_sql_alert_email_chart)
db.session.commit()
# assert that after a success, when back to error we send the error notification
# again
with freeze_time("2020-01-01T00:32:00Z"):
with pytest.raises((AlertQueryError, AlertQueryInvalidTypeError)):
AsyncExecuteReportScheduleCommand(
test_id, create_invalid_sql_alert_email_chart.id, datetime.utcnow()
).run()
db.session.commit()
assert (
get_notification_error_sent_count(create_invalid_sql_alert_email_chart) == 2
)
|
py | 7dfb70dae57890958dda608951963a015e9d06d9 | """
No: 8
Date: 11-11-2020
Problem:
Two Sum
Given an array of integers, return whether or not two numbers sum to a given
target, k.
Note: you may not sum a number with itself.
TestCases:
[1, 3, 8, 2], k = 10, return true (8 + 2)
[3, 9, 13, 7], k = 8, return false
[4, 2, 6, 5, 2], k = 4, return true (2 + 2)
Time Complexity:
O(n) - length of array
Space Complexity:
O(n) - length of array
"""
def TwoSum(arr, k) -> bool:
dict = {}
for num in arr:
if (num) not in dict:
dict[k - num] = num
else:
return True
return False
tests = [
[[1, 3, 8, 2], 10, True],
[[3, 9, 13, 7], 8, False],
[[4, 2, 6, 5, 2], 4, True],
]
for test in tests:
assert TwoSum(test[0], test[1]) == test[2]
|
py | 7dfb70f6ab21a60f777ea47a7c8173b3e958a597 | from datetime import datetime
from flask import (Flask, redirect, render_template, request, make_response,
jsonify, url_for)
from flask.ext.sqlalchemy import SQLAlchemy
from temperature_client import TemperatureClient
###
# Configuration
###
# Temperature server (see pi/temp-server/server.py)
TEMP_SERVER_HOST = '127.0.0.1'
TEMP_SERVER_PORT = 8888
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///roasts.db'
db = SQLAlchemy(app)
temperature_client = TemperatureClient(TEMP_SERVER_HOST, TEMP_SERVER_PORT)
###
# Models
###
class Roast(db.Model):
id = db.Column(db.Integer, primary_key=True)
beans = db.Column(db.String(100))
weight = db.Column(db.Integer)
roaster = db.Column(db.String(50))
duration = db.Column(db.Numeric(5, 2))
start_at = db.Column(db.DateTime(True))
end_at = db.Column(db.DateTime(True))
first_crack = db.Column(db.DateTime(True))
second_crack = db.Column(db.DateTime(True))
synced = db.Column(db.Boolean)
def __init__(self, beans, weight, roaster, duration):
self.beans = beans
self.weight = weight
self.roaster = roaster
self.duration = duration
self.start_at = datetime.utcnow()
self.synced = False
class TemperatureReading(db.Model):
id = db.Column(db.Integer, primary_key=True)
farenheight = db.Column(db.Numeric(6, 2))
log_date = db.Column(db.DateTime(True))
roast_id = db.Column(db.Integer, db.ForeignKey('roast.id'))
roast = db.relationship('Roast',
backref=db.backref('temperature_readings', lazy='dynamic'))
def __init__(self, roast_id, farenheight):
self.roast_id = roast_id
self.farenheight = farenheight
@app.route('/')
def new_roast():
"""Prompt for beans, roaster's name, and weight"""
return render_template('new_roast.html')
@app.route('/', methods=['POST'])
def create_roast():
form = request.form
roast = Roast(form.get('beans'), form.get('weight'), form.get('roaster'),
form.get('duration'))
# Add the Roast to the database
db.session.add(roast)
db.session.commit()
# Redirect to the current roast
return redirect(url_for('.current_roast'))
@app.route('/current')
def current_roast():
roast = Roast.query.filter(Roast.end_at == None).first()
if roast == None:
return redirect(url_for('.new_roast'))
return render_template('current.html', roast=roast)
@app.route('/current-temperature')
def current_temperature():
reading = temperature_client.get()
return make_response(jsonify(reading))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
|
py | 7dfb714a705e0c03e58a931e35e2d7474b1aa686 | import evdev
devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]
for device in devices:
print(device.fn, device.name, device.phys)
print(device.capabilities(verbose=True))
print('')
|
py | 7dfb71d4b41fe6aa8af403dc91605e601423ce89 | # Python script for PythonHelper unit test
# Print Hello World + input arguments
import sys
if __name__ == '__main__':
result = "Hello World!"
parameters = sys.argv[1:]
for p in parameters :
result = result + " " + p
print(result) |
py | 7dfb73193450467bd3e85da32205b5c780ecb3e2 | print '''
######################################################################################################
# ________ ________ __ __ _______ ______ ________ ________ ______ __ __ #
#| \| \| \ | \| \ / \ | \| \ / \ | \ / \ #
# \$$$$$$$$| $$$$$$$$| $$\ | $$| $$$$$$$\| $$$$$$\ \$$$$$$$$| $$$$$$$$| $$$$$$\| $$\ / $$ #
# | $$ | $$__ | $$$\| $$| $$ | $$| $$___\$$ | $$ | $$__ | $$__| $$| $$$\ / $$$ #
# | $$ | $$ \ | $$$$\ $$| $$ | $$ \$$ \ | $$ | $$ \ | $$ $$| $$$$\ $$$$ #
# | $$ | $$$$$ | $$\$$ $$| $$ | $$ _\$$$$$$\ | $$ | $$$$$ | $$$$$$$$| $$\$$ $$ $$ #
# | $$ | $$_____ | $$ \$$$$| $$__/ $$| \__| $$ | $$ | $$_____ | $$ | $$| $$ \$$$| $$ #
# | $$ | $$ \| $$ \$$$| $$ $$ \$$ $$ | $$ | $$ \| $$ | $$| $$ \$ | $$ #
# \$$ \$$$$$$$$ \$$ \$$ \$$$$$$$ \$$$$$$ \$$ \$$$$$$$$ \$$ \$$ \$$ \$$ #
# #
# Coded by : Ismael Al-safadi * photo forensic * #
######################################################################################################
'''
import sys
choice = raw_input("Select your choice \npress [1] to extract metadata. \npress [2] to delete metadata.\n[-]write (exit) to leave.\n>> ")
if choice =="1":
from PIL import Image
from PIL.ExifTags import TAGS
path=raw_input("enter the path of image:")
dict_dict = {}
try:
i = Image.open(path)
info = i._getexif()
for tag, value in (info.items()):
decoded = TAGS.get(tag, tag)
dict_dict[decoded] = value
type_of_phone= dict_dict['Make']
flash=dict_dict['Flash']
GPSInfo=dict_dict['GPSInfo']
DateTimeOriginal=dict_dict['DateTimeOriginal']
Software=dict_dict['Software']
Model=dict_dict['Model']
print "[+]The type of camera or Phone :"+type_of_phone
if flash!=0:
print "[+]Flash is open"
else:
print "[+]Flash is not open"
print"[+]GPSInfo:"+str(GPSInfo)
print "[+]DateTimeOriginal:"+DateTimeOriginal
print "[+]Software:"+Software
print "[+]Model:"+Model
except:
print "\n [-] Ops!! sorry we cant find anything !"
elif choice =="2":
import piexif
path=raw_input("enter the path of image:")
data = piexif.load(path)
piexif.remove(path)
empty = piexif.load(path)
print "\n [+] Done ^__^ "
elif choice == "exit":
sys.exit()
else :
print " Wrong choice \n "
|
py | 7dfb732eac233c7a428c06ff516eae92d768f3e6 | # encoding: utf-8
from django.core.cache import cache
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib import messages
from seahub.avatar.forms import PrimaryAvatarForm, DeleteAvatarForm, UploadAvatarForm,\
GroupAvatarForm
from seahub.avatar.models import Avatar, GroupAvatar
from seahub.avatar.settings import AVATAR_MAX_AVATARS_PER_USER, AVATAR_DEFAULT_SIZE
from seahub.avatar.signals import avatar_updated
from seahub.avatar.util import get_primary_avatar, get_default_avatar_url, \
invalidate_cache, invalidate_group_cache
from seahub.utils import render_error, render_permission_error, \
check_and_get_org_by_group
from seahub.auth.decorators import login_required
from seaserv import ccnet_threaded_rpc, check_group_staff
def _get_next(request):
"""
The part that's the least straightforward about views in this module is how they
determine their redirects after they have finished computation.
In short, they will try and determine the next place to go in the following order:
1. If there is a variable named ``next`` in the *POST* parameters, the view will
redirect to that variable's value.
2. If there is a variable named ``next`` in the *GET* parameters, the view will
redirect to that variable's value.
3. If Django can determine the previous page from the HTTP headers, the view will
redirect to that previous page.
"""
next = request.POST.get('next', request.GET.get('next',
request.META.get('HTTP_REFERER', None)))
if not next:
next = request.path
return next
def _get_avatars(user):
# Default set. Needs to be sliced, but that's it. Keep the natural order.
avatars = Avatar.objects.filter(emailuser=user.email)
# Current avatar
primary_avatar = avatars.order_by('-primary')[:1]
if primary_avatar:
avatar = primary_avatar[0]
else:
avatar = None
if AVATAR_MAX_AVATARS_PER_USER == 1:
avatars = primary_avatar
else:
# Slice the default set now that we used the queryset for the primary avatar
avatars = avatars[:AVATAR_MAX_AVATARS_PER_USER]
return (avatar, avatars)
@login_required
def add(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
upload_avatar_form = upload_form(request.POST or None,
request.FILES or None, user=request.user)
if request.method == "POST" and 'avatar' in request.FILES:
if upload_avatar_form.is_valid():
avatar = Avatar(
emailuser = request.user.username,
primary = True,
)
image_file = request.FILES['avatar']
avatar.avatar.save(image_file.name, image_file)
avatar.save()
messages.success(request, _("Successfully uploaded a new avatar."))
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return HttpResponseRedirect(next_override or _get_next(request))
else:
messages.error(request, upload_avatar_form.errors['avatar'])
return HttpResponseRedirect(_get_next(request))
else:
# Only allow post request to change avatar.
raise Http404
# return render_to_response(
# 'avatar/add.html',
# extra_context,
# context_instance = RequestContext(
# request,
# { 'avatar': avatar,
# 'avatars': avatars,
# 'upload_avatar_form': upload_avatar_form,
# 'next': next_override or _get_next(request), }
# )
# )
@login_required
def group_add(request, gid):
group_id_int = int(gid) # Checked by URL Conf
if not check_group_staff(group_id_int, request.user.username):
raise Http404
group = ccnet_threaded_rpc.get_group(group_id_int)
if not group:
return HttpResponseRedirect(reverse('group_list', args=[]))
# change navigator when user in diffent context
org, base_template = check_and_get_org_by_group(group_id_int,
request.user.username)
form = GroupAvatarForm(request.POST or None, request.FILES or None)
if request.method == 'POST' and 'avatar' in request.FILES:
if form.is_valid():
image_file = request.FILES['avatar']
avatar = GroupAvatar()
avatar.group_id = gid
avatar.avatar.save(image_file.name, image_file)
avatar.save()
# invalidate group avatar cache
invalidate_group_cache(gid)
messages.success(request, _("Successfully uploaded a new group avatar."))
else:
messages.error(request, form.errors['avatar'])
return HttpResponseRedirect(_get_next(request))
else:
# Only allow post request to change group avatar.
raise Http404
# return render_to_response('avatar/set_avatar.html', {
# 'group' : group,
# 'form' : form,
# 'org': org,
# 'base_template': base_template,
# }, context_instance=RequestContext(request))
@login_required
def change(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, primary_form=PrimaryAvatarForm,
*args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
if avatar:
kwargs = {'initial': {'choice': avatar.id}}
else:
kwargs = {}
upload_avatar_form = upload_form(user=request.user, **kwargs)
primary_avatar_form = primary_form(request.POST or None,
user=request.user, avatars=avatars, **kwargs)
if request.method == "POST":
updated = False
if 'choice' in request.POST and primary_avatar_form.is_valid():
avatar = Avatar.objects.get(id=
primary_avatar_form.cleaned_data['choice'])
avatar.primary = True
avatar.save()
updated = True
messages.success(request, _("Successfully updated your avatar."))
if updated:
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return HttpResponseRedirect(next_override or _get_next(request))
return render_to_response(
'avatar/change.html',
extra_context,
context_instance = RequestContext(
request,
{ 'avatar': avatar,
'avatars': avatars,
'upload_avatar_form': upload_avatar_form,
'primary_avatar_form': primary_avatar_form,
'next': next_override or _get_next(request), }
)
)
@login_required
def delete(request, extra_context=None, next_override=None, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
delete_avatar_form = DeleteAvatarForm(request.POST or None,
user=request.user, avatars=avatars)
if request.method == 'POST':
if delete_avatar_form.is_valid():
ids = delete_avatar_form.cleaned_data['choices']
if unicode(avatar.id) in ids and avatars.count() > len(ids):
# Find the next best avatar, and set it as the new primary
for a in avatars:
if unicode(a.id) not in ids:
a.primary = True
a.save()
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
break
# NOTE: `Avatar.objects.filter(id__in=ids).delete()` will NOT work
# correctly. Sinct delete() on QuerySet will not call delete
# method on avatar object.
for a in Avatar.objects.filter(id__in=ids):
a.delete()
messages.success(request, _("Successfully deleted the requested avatars."))
return HttpResponseRedirect(next_override or _get_next(request))
return render_to_response(
'avatar/confirm_delete.html',
extra_context,
context_instance = RequestContext(
request,
{ 'avatar': avatar,
'avatars': avatars,
'delete_avatar_form': delete_avatar_form,
'next': next_override or _get_next(request), }
)
)
def render_primary(request, extra_context={}, user=None, size=AVATAR_DEFAULT_SIZE, *args, **kwargs):
size = int(size)
avatar = get_primary_avatar(user, size=size)
if avatar:
# FIXME: later, add an option to render the resized avatar dynamically
# instead of redirecting to an already created static file. This could
# be useful in certain situations, particulary if there is a CDN and
# we want to minimize the storage usage on our static server, letting
# the CDN store those files instead
return HttpResponseRedirect(avatar.avatar_url(size))
else:
url = get_default_avatar_url()
return HttpResponseRedirect(url)
|
py | 7dfb73fc0ee1179665ae8627593030d05622f1ab | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Useful utilities for tests."""
import functools
import os
import time
import uuid
from testtools import testcase
TZ = None
def timezone(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
tz_original = os.environ.get('TZ')
try:
if TZ:
os.environ['TZ'] = TZ
time.tzset()
return func(*args, **kwargs)
finally:
if TZ:
if tz_original:
os.environ['TZ'] = tz_original
else:
if 'TZ' in os.environ:
del os.environ['TZ']
time.tzset()
return wrapper
def new_uuid():
"""Return a string UUID."""
return uuid.uuid4().hex
def wip(message, expected_exception=Exception, bug=None):
"""Mark a test as work in progress.
Based on code by Nat Pryce:
https://gist.github.com/npryce/997195#file-wip-py
The test will always be run. If the test fails then a TestSkipped
exception is raised. If the test passes an AssertionError exception
is raised so that the developer knows they made the test pass. This
is a reminder to remove the decorator.
:param message: a string message to help clarify why the test is
marked as a work in progress
:param expected_exception: an exception class that will be checked for
when @wip verifies an exception is raised. The
test will fail if a different exception is
raised. Default is "any" exception is valid
:param bug: (optional) a string for tracking the bug and what bug should
cause the @wip decorator to be removed from the testcase
Usage:
>>> @wip('Expected Error', expected_exception=Exception, bug="#000000")
>>> def test():
>>> pass
"""
if bug:
bugstr = " (BugID " + bug + ")"
else:
bugstr = ""
def _wip(f):
@functools.wraps(f)
def run_test(*args, **kwargs):
__e = None
try:
f(*args, **kwargs)
except Exception as __e: # noqa F841
if (expected_exception != Exception and
not isinstance(__e, expected_exception)):
raise AssertionError(
'Work In Progress Test Failed%(bugstr)s with '
'unexpected exception. Expected "%(expected)s" '
'got "%(exception)s": %(message)s ' %
{'message': message, 'bugstr': bugstr,
'expected': expected_exception.__class__.__name__,
'exception': __e.__class__.__name__})
# NOTE(notmorgan): We got the expected exception we can safely
# skip this test.
raise testcase.TestSkipped(
'Work In Progress Test Failed as '
'expected%(bugstr)s: %(message)s' %
{'message': message, 'bugstr': bugstr})
raise AssertionError('Work In Progress Test Passed%(bugstr)s: '
'%(message)s' % {'message': message,
'bugstr': bugstr})
return run_test
return _wip
|
py | 7dfb748a88fcaf11df8f7b79ce9cc5ffc02b81e4 | import boto3
from botocore.exceptions import ClientError
import logging
import config
import os
import csv
def upload_file(local_file, object_name=None):
"""Upload a file to the S3 bucket for this project
:param local_file: Path to local file to upload
:param object_name: Name under which to store the file in the bucket
:return: True if file is uploaded, else False
"""
if (object_name == None):
object_name = local_file
try:
s3 = boto3.client('s3', aws_access_key_id=config.ACCESS_KEY,
aws_secret_access_key=config.SECRET_KEY)
s3.upload_file(
local_file, config.BUCKET, object_name
)
except ClientError as e:
logging.error(e)
return False
return True
def download_file(object_name, location = None):
"""Download a file from the S3 bucket for this project
:param object_name: Name under which the file is stored in the bucket
:param location: Local path to which the file is downloaded
:return: True if file is downloaded, else False
"""
if (object_name == None):
object_name = local_file
try:
s3 = boto3.client('s3', aws_access_key_id=config.ACCESS_KEY,
aws_secret_access_key=config.SECRET_KEY)
if (location == None):
location = "./" + object_name
s3.download_file(
config.BUCKET, object_name, location
)
except ClientError as e:
logging.error(e)
return False
return True
# upload data
uploaded_cancers = []
for cancer in os.listdir("./data"):
if (cancer == ".DS_Store"): continue
try:
upload_file(f"./data/{cancer}/data.csv", f"{cancer}_data.csv")
upload_file(f"./data/{cancer}/clinical_data.csv", f"{cancer}_clinical_data.csv")
uploaded_cancers.append(cancer)
print(cancer)
except FileNotFoundError as e:
print(e)
continue
# save a .csv with cancer names
with open("./uploaded_cancers", 'w', newline='') as out_file:
wr = csv.writer(out_file, quoting=csv.QUOTE_ALL)
wr.writerow(uploaded_cancers)
# upload_file("./data.csv", "data.csv")
# upload_file("./clinical_data.csv", "clinical_data.csv")
|
py | 7dfb75a7b10833d07a7df7feca72bb0a199dea25 | #python exception let you deal with unexpected results
#unexpected results
try:
print(a) #this will throw an exception
except:
print("a is not defined!")
#there are specific errors in python
try:
print(a) #this will throw a NameError
except NameError:
print("a is still not defined!")
except:
print("Something else went wrong!")
#this will break our system
#since a is not defined
print(a) |
py | 7dfb7669e1be40ccbdc80d5b2f59db87dac57f38 | #! /usr/bin/env python
import os
import sys
from lib.common import list_file_paths
import pickle
import random
to_skip_pickle = "evade_both_to_skip.pickle"
if os.path.isfile(to_skip_pickle):
to_skip = pickle.load(open(to_skip_pickle))
else:
to_skip = ['1ec657f52bf1811af14d7da549cb6add70c778f0', 'b01be494ac00843796cb200caf91e7ab2e997c34', 'b4f13bf5f4174fd7a7c2a52b21309da8da0b33ce', 'f2a9170030b999834018203f468ea9bcf8e444c0', 'f3efb335a617ecb76e1e7519bc7c2c3df8fa47f6']
def main(argv):
# robustmlp
classifier_name = sys.argv[1]
ext_genome_folder = sys.argv[2]
ext_genome_tag = ext_genome_folder.split('/')[-1]
pop = sys.argv[3]
gen = sys.argv[4]
mutation_rate = sys.argv[5]
round_id = int(sys.argv[6])
token = sys.argv[7]
start = int(sys.argv[8])
if not os.path.isdir(ext_genome_folder):
print "Error: invalid ext genome folder."
sys.exit(1)
seed_paths = pickle.load(open('shuffled_seed_paths_most_benign.pickle', 'rb'))
for seed_path in seed_paths[start:]:
start_hash = seed_path.split('/')[-1].split('.')[0]
if start_hash in to_skip:
print "Skipped ", start_hash
continue
cmd = "./gp_1_replace_mix.py -c %s -s %s -e %s -p %s -g %s -m %s -x 0 -f 0 -t %s --round %d" \
% (classifier_name, seed_path, ext_genome_folder, pop, gen, mutation_rate, token, round_id)
try:
print cmd
os.system(cmd)
except KeyboardInterrupt, error:
break
if __name__ == '__main__':
main(sys.argv)
|
py | 7dfb769eb03d5be318cb102a630728947e956816 | import numpy as np
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from ..models.profile import Profile
from ..interfaces.helper import Helper
from ..interfaces.glove import GloVe
from .noGloveValueError import NoGloveValueError
class Features:
"""
Contains all pipeline functions for both LIWC and glove.
"""
def __init__(
self,
):
return
def featureLIWC(
self,
profileCol,
):
"""
Extract LIWC features (namely LIWC categories) from
each profile in list as feature.
Parameters
----------
profileCol : list, default=None, required
List with profiles to generate features for.
Returns
-------
np.array(outputList) : numpy.array
Generated features in numpy format.
"""
# will contain the LIWC measures for each profile
outputList = []
# loop over profileCollection
for profile in profileCol:
# create row
liwc_data = []
# get names of liwc categories
for attrName in Profile.liwc_category_list:
# get value of current category
attr = getattr(profile, attrName)
# append to current profile
# and convert to float
liwc_data.append(np.float(attr))
outputList.append(liwc_data)
# create numpy array, as scikit needs this format
return np.array(outputList)
def createLIWCFeaturePipeline(
self,
):
"""
Create pipeline that can be passed into multiple training procceses
this is just a blueprint for calculating the features
no features are calculated yet!
Returns
-------
featurePipeline : Pipeline
Pipeline containing feature generation and scaling.
"""
# Create skicit-learn compatible FunctionTransformers
# for usage with other sklearn functions
# featureLIWC is the name of the function to be called to
# extract features
liwc_Trans = FunctionTransformer(self.featureLIWC, validate=False)
# Combine feature(s) with FeatureUnion
featureTransformer = FeatureUnion([
('liwc', liwc_Trans),
], n_jobs=-1) # parallelize via multiprocess
# combine into a pipeline including scaling
featurePipeline = Pipeline([
('features', featureTransformer),
("stdScaler", StandardScaler())
])
return featurePipeline
def _condenseGloVeVectors(
self,
vectorList,
):
"""
For each user a vectorList is passed in with different length.
This will be condensed into a single 900 dim vector.
"""
# convert to np array for mean,max,min functions
vectorList = np.array(vectorList)
# correct structure from (1,x,300) to (x,300)
vectorList = vectorList[0]
# for each dimension identify mean,max,min
# and save in separate vector
meanVector = vectorList.mean(axis=0)
maxVector = np.amax(a=vectorList, axis=0)
minVector = np.amin(a=vectorList, axis=0)
# combine all 300 dim vectors in 900 dim vector
returnVector = []
returnVector.extend(meanVector)
returnVector.extend(maxVector)
returnVector.extend(minVector)
# convert to numpy array for scikit
returnVector = np.array(returnVector)
return returnVector
def featureGloVe(
self,
profileList,
):
"""
For each profile in profile list generate GloVe features.
Each profile contains text and for this text the glove vectors
are retrieved and condensed into one single vector for this user.
All user vectors are appended into the outputList.
The word coverageStatistics and wordCounts for each user
are saved in this feature object instance to be retrieved later.
Parameters
----------
profileList : list, default=None, required
List containing relevant profiles for which to extract features.
Returns
-------
np.array(outputList) : numpy.array
Features in correct output format.
"""
if self.glove is None:
raise Exception("GloVe not loaded.")
# will contain the GloVe measures for each profile
outputList = []
# get index as list, for faster lookup
index_as_list = self.glove.get_index_list()
# initialize progress bar
helper = Helper()
numProfiles = len(profileList)
helper.printProgressBar(
0,
numProfiles,
prefix='Progress:',
suffix='Complete',
length=50
)
# list for saving coverage statistics
coverageStatistics = []
# word count, that are included, for profiles
wordCounts = []
# loop over profileList
for num, profile in enumerate(profileList):
# tokenize text in tweets
# separated by space
tokens = profile.text.split(' ')
profile_vectors = []
# for each word lookup glove vector
# if no match -> ignore it
# first identify set of words not in glove
not_in_glove = set(np.setdiff1d(tokens, index_as_list))
# get words in glove, indcluding duplicates
# so if words exist n times in text, they will be n times in list
in_glove = [word for word in tokens if word not in not_in_glove]
if len(in_glove) == 0:
# es konnte kein wort in glove gefunden werden
# raise Exception
eString = (
"Could not find any glove values for given words"
)
raise NoGloveValueError(eString)
else:
# mind. ein Wort wurde gefunden
# lookup glove vectors
# should return duplicates!
glove_values = self.glove.getGloVeByWordList(
wordList=in_glove
)
converted_vals = np.array(glove_values)
# add vectors to list of this profile's vectors
profile_vectors.append(converted_vals)
# fill coverage statistics as share of tokens (=words)
# that exist in glove in comparison to total tokens
profile_coverage = len(converted_vals) / len(tokens)
# add to global list
coverageStatistics.append(profile_coverage)
wordCounts.append(len(tokens))
# after all vectors for this profile are retrieved
# condense with maximum, minimum, average in 900 dim vector
final_vector = self._condenseGloVeVectors(profile_vectors)
# add 900 dim to output list
outputList.append(final_vector)
# Update Progress Bar
helper.printProgressBar(
num + 1,
numProfiles,
prefix='Progress:',
suffix='Complete',
length=50
)
# save coverage statistics in class attribute to be accessible
self.coverageStatistics = coverageStatistics
self.wordCounts = wordCounts
# create numpy array, as scikit needs this format
return np.array(outputList)
def createGloVeFeaturePipeline(
self,
glovePath='data/glove/glove.db',
dataBaseMode=True,
):
"""
Create pipeline that can be passed into multiple training procceses
this is just a blueprint for calculating the features
no features are calculated yet!
No parallelization (n_jobs=1) due to GloVe lookup in database.
Parameters
----------
glovePath : string, default='data/glove/glove.db'
Path to GloVe flat or database file.
dataBaseMode : boolean, default=True
If True path points to SQLite database file.
Returns
-------
featurePipeline : Pipeline
Pipeline containing feature generation.
"""
glove = GloVe(
filePath=glovePath,
dataBaseMode=dataBaseMode,
)
self.glove = glove
# Create skicit-learn compatible FunctionTransformers
# for usage with other sklearn functions
# featureGloVe is the name of the function to be called to
# extract features
glove_Trans = FunctionTransformer(self.featureGloVe, validate=False)
# Combine feature(s) with FeatureUnion
featureTransformer = FeatureUnion([
('glove', glove_Trans),
], n_jobs=1) # no parallelization
# combine into a pipeline, no scaling since GloVe is scaled
featurePipeline = Pipeline([
('features', featureTransformer)
])
return featurePipeline
|
py | 7dfb76a54ad9097f3f3adae24d57398fb76edeac | # -*- coding: utf-8 -*-
# flake8: noqa
"""
MIT License
Copyright (c) 2019-2021 Terbau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = '3.5.0'
from .client import Client, run_multiple, start_multiple, close_multiple
from .auth import (Auth, EmailAndPasswordAuth, ExchangeCodeAuth,
AuthorizationCodeAuth, DeviceAuth, RefreshTokenAuth,
AdvancedAuth)
from .friend import Friend, IncomingPendingFriend, OutgoingPendingFriend
from .message import FriendMessage, PartyMessage
from .party import (DefaultPartyConfig, DefaultPartyMemberConfig, PartyMember,
ClientPartyMember, JustChattingClientPartyMember, Party,
ClientParty, ReceivedPartyInvitation, SentPartyInvitation,
PartyJoinConfirmation)
from .presence import Presence, PresenceGameplayStats, PresenceParty
from .user import (ClientUser, User, BlockedUser, ExternalAuth,
UserSearchEntry, SacSearchEntryUser)
from .stats import StatsV2, StatsCollection
from .enums import *
from .errors import *
from .store import Store, FeaturedStoreItem, DailyStoreItem
from .news import BattleRoyaleNewsPost
from .playlist import Playlist
from .kairos import Avatar
from .http import HTTPRetryConfig, Route
|
py | 7dfb76bee43bf1fd88795197397df974948c7f80 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 04:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0003_auto_20171119_0721'),
]
operations = [
migrations.RenameModel(
old_name='Photo_three',
new_name='Photos_three',
),
]
|
py | 7dfb7782d8a03bf5ab4900c520b0462452303e5a | from picamera import PiCamera
from time import sleep, time
import sys
import argparse
import cv2
VALID_MODES = ["photo", "video"]
def video(camera, filename="/home/pi/Desktop/video.h264", duration=10):
camera.start_recording(filename)
sleep(duration)
camera.stop_recording()
return filename
def photo(camera, filename="/home/pi/Desktop/photo.jpg"):
camera.capture(filename)
return filename
def acquire(mode="video"):
cam_settings = dict(resolution=(1024, 768), framerate=15)
prev_settings = dict(alpha=200, fullscreen=False, window=(0, 0, 1024, 768))
camera_function = dict(video=video, photo=photo)
file_ext = dict(video="h264", photo="jpg")
filename = f"/home/pi/Desktop/{mode}-{time()}.{file_ext[mode]}"
with PiCamera(**cam_settings) as camera:
camera.rotation = 90
with camera.start_preview(**prev_settings) as preview:
sleep(2)
output = camera_function[mode](camera, filename=filename)
return output
def cli_args():
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
ap.add_argument("-p", "--prototxt", required=True, help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True, help="path to Caffe pre-trained model")
ap.add_argument("-l", "--labels", required=True, help="path to ImageNet labels (i.e., syn-sets)")
args = vars(ap.parse_args())
return args
def main():
mode = "photo"
# args = cli_args()
output = acquire(mode)
print(f"Saved output to: {output}")
if __name__ == "__main__":
main()
|
py | 7dfb7a02d7a4c70e9188ae7ec7e9206e819ee2c5 | # -*- coding: utf-8 -*-
import logging
from typing import List
from aiohttp.web import Request
from mtpylon.middlewares import MiddleWareFunc
from mtpylon.message_sender import MessageSender
from mtpylon.income_message import IncomeMessage
logger = logging.getLogger(__name__)
async def handle_unknown_message(
middlewares: List[MiddleWareFunc],
sender: MessageSender,
request: Request,
message: IncomeMessage,
):
"""
Logs that we don't know how to handle this message
"""
logger.warning(f'Unknown message: {message}')
|
py | 7dfb7aeee0a6215762e518792d484ded225b3a4c | from itertools import chain
import json
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.safestring import SafeString
from django.utils.module_loading import import_string
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.views.generic import View, TemplateView
from django.shortcuts import redirect
from census.views import GeographyDetailView as BaseGeographyDetailView, LocateView as BaseLocateView, render_json_to_response
from wazimap.geo import geo_data
from wazimap.profiles import enhance_api_data
from wazimap.data.tables import get_datatable, DATA_TABLES
from wazimap.data.utils import LocationNotFound
from wazimap.data.download import DownloadManager
def render_json_error(message, status_code=400):
""" Utility method for rendering a view's data to JSON response.
"""
result = json.dumps({'error': message}, indent=4)
response = HttpResponse(result, content_type='application/javascript')
response.status_code = status_code
return response
class HomepageView(TemplateView):
template_name = 'homepage.html'
def get_context_data(self, *args, **kwargs):
return {
'root_geo': geo_data.root_geography(),
}
class GeographyDetailView(BaseGeographyDetailView):
adjust_slugs = True
default_geo_version = None
def dispatch(self, *args, **kwargs):
request = args[0]
version = request.GET.get('geo_version', self.default_geo_version)
self.geo_id = self.kwargs.get('geography_id', None)
try:
self.geo_level, self.geo_code = self.geo_id.split('-', 1)
self.geo = geo_data.get_geography(self.geo_code, self.geo_level, version)
except (ValueError, LocationNotFound):
raise Http404
# check slug
if self.adjust_slugs and (kwargs.get('slug') or self.geo.slug):
if kwargs['slug'] != self.geo.slug:
kwargs['slug'] = self.geo.slug
url = '/profiles/%s-%s-%s' % (self.geo_level, self.geo_code, self.geo.slug)
return redirect(url, permanent=True)
# Skip the parent class's logic completely and go back to basics
return TemplateView.dispatch(self, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
page_context = {}
# load the profile
profile_method = settings.WAZIMAP.get('profile_builder', None)
self.profile_name = settings.WAZIMAP.get('default_profile', 'default')
if not profile_method:
raise ValueError("You must define WAZIMAP.profile_builder in settings.py")
profile_method = import_string(profile_method)
profile_data = profile_method(self.geo, self.profile_name, self.request)
profile_data['geography'] = self.geo.as_dict_deep()
profile_data = enhance_api_data(profile_data)
page_context.update(profile_data)
profile_data_json = SafeString(json.dumps(profile_data, cls=DjangoJSONEncoder))
page_context.update({
'profile_data_json': profile_data_json
})
# is this a head-to-head view?
page_context['head2head'] = 'h2h' in self.request.GET
return page_context
def get_geography(self, geo_id):
# stub this out to prevent the subclass for calling out to CR
pass
def get_template_names(self):
return ['profile/profile_detail_%s.html' % self.profile_name, 'profile/profile_detail.html']
class GeographyJsonView(GeographyDetailView):
""" Return geo profile data as json. """
adjust_slugs = False
default_geo_version = settings.WAZIMAP.get('legacy_embed_geo_version')
def dispatch(self, *args, **kwargs):
return super(GeographyJsonView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return HttpResponse(context['profile_data_json'], content_type='application/javascript')
class PlaceSearchJson(View):
def get(self, request, *args, **kwargs):
geo_levels = request.GET.get('geolevels', None)
geo_version = request.GET.get('geo_version', None)
if geo_levels:
geo_levels = [lev.strip() for lev in geo_levels.split(',')]
geo_levels = [lev for lev in geo_levels if lev]
if 'q' in request.GET:
search_term = request.GET['q']
places = geo_data.get_locations(search_term, geo_levels, geo_version)
return render_json_to_response(
{'results': [p.as_dict() for p in places]}
)
elif 'coords' in request.GET and ',' in request.GET['coords']:
lat, lon = self.request.GET['coords'].split(',', 1)
try:
lat = float(lat)
lon = float(lon)
except ValueError as e:
return HttpResponseBadRequest('bad parameter: %s' % e.message)
places = geo_data.get_locations_from_coords(latitude=lat, longitude=lon, levels=geo_levels, version=geo_version)
return render_json_to_response({'results': [p.as_dict() for p in places]})
else:
return HttpResponseBadRequest('"q" or "coords" parameter is required')
class LocateView(BaseLocateView):
def get_context_data(self, *args, **kwargs):
page_context = {}
lat = self.request.GET.get('lat', None)
lon = self.request.GET.get('lon', None)
if lat and lon:
version = self.request.GET.get('geo_version', None)
places = geo_data.get_locations_from_coords(latitude=lat, longitude=lon, version=version)
page_context.update({
'location': {
'lat': lat,
'lon': lon
},
'places': places
})
return page_context
class DataAPIView(View):
"""
View that provides an API for census table information, mimicking that
of the Censusreporter API described at https://github.com/censusreporter/census-api#get-10datashowacs
An example call:
http://api.censusreporter.org/1.0/data/show/latest?table_ids=B17001&geo_ids=04000US36%2C01000US
"""
def get(self, request, *args, **kwargs):
try:
self.geo_ids = request.GET.get('geo_ids', '').split(',')
geo_version = request.GET.get('geo_version', None)
self.data_geos, self.info_geos = self.get_geos(self.geo_ids, geo_version)
except LocationNotFound as e:
return render_json_error(e.message, 404)
try:
self.table_ids = request.GET.get('table_ids', '').split(',')
self.tables = [get_datatable(t) for t in self.table_ids]
except KeyError as e:
return render_json_error('Unknown table: %s' % e.message, 404)
if kwargs.get('action') == 'show':
return self.show(request)
if kwargs.get('action') == 'download':
return self.download(request)
def show(self, request):
dataset = ', '.join(sorted(list(set(t.dataset_name for t in self.tables))))
years = ', '.join(sorted(list(set(t.year for t in self.tables))))
data = self.get_data(self.data_geos, self.tables)
return render_json_to_response({
'release': {
'name': dataset,
'years': years,
},
'tables': dict((t.id.upper(), t.as_dict()) for t in self.tables),
'data': data,
'geography': dict((g.geoid, g.as_dict()) for g in chain(self.data_geos, self.info_geos)),
})
def download(self, request):
mgr = DownloadManager()
fmt = request.GET.get('format', 'csv')
if fmt not in mgr.DOWNLOAD_FORMATS:
response = HttpResponse('Unspported format %s. Supported formats: %s' % (fmt, ', '.join(mgr.DOWNLOAD_FORMATS.keys())))
response.status_code = 400
return response
data = self.get_data(self.data_geos, self.tables)
content, fname, mime_type = mgr.generate_download_bundle(self.tables, self.data_geos, self.geo_ids, data, fmt)
response = HttpResponse(content, content_type=mime_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % fname
return response
def get_geos(self, geo_ids, geo_version):
"""
Return a tuple (data_geos, info_geos) of geo objects,
where data_geos or geos we should get data for, and info_geos
are geos that we only need to return geo info/metadata for.
"""
data_geos = []
info_geos = []
for geo_id in geo_ids:
# either country-KE or level|country-KE, which indicates
# we must break country-KE into +levels+
if '-' not in geo_id:
raise LocationNotFound('Invalid geo id: %s' % geo_id)
level, code = geo_id.split('-', 1)
if '|' in level:
# break geo down further
split_level, level = level.split('|', 1)
geo = geo_data.get_geography(code, level, geo_version)
info_geos.append(geo)
try:
data_geos.extend(geo.split_into(split_level))
except ValueError:
raise LocationNotFound('Invalid geo level: %s' % split_level)
else:
# normal geo
data_geos.append(geo_data.get_geography(code, level, geo_version))
return data_geos, info_geos
def get_data(self, geos, tables):
data = {}
for table in tables:
for geo_id, table_data in table.raw_data_for_geos(geos).iteritems():
data.setdefault(geo_id, {})[table.id.upper()] = table_data
return data
class TableAPIView(View):
"""
View that lists data tables.
"""
def get(self, request, *args, **kwargs):
return render_json_to_response([t.as_dict(columns=False) for t in DATA_TABLES.itervalues()])
class AboutView(TemplateView):
template_name = 'about.html'
class HelpView(TemplateView):
template_name = 'help.html'
class GeographyCompareView(TemplateView):
template_name = 'profile/head2head.html'
def get_context_data(self, geo_id1, geo_id2):
page_context = {
'geo_id1': geo_id1,
'geo_id2': geo_id2,
}
try:
level, code = geo_id1.split('-', 1)
page_context['geo1'] = geo_data.get_geography(code, level)
level, code = geo_id2.split('-', 1)
page_context['geo2'] = geo_data.get_geography(code, level)
except (ValueError, LocationNotFound):
raise Http404
return page_context
class GeoAPIView(View):
"""
View that lists things about geos. Currently just parents.
"""
def get(self, request, geo_id, *args, **kwargs):
try:
level, code = geo_id.split('-', 1)
geo = geo_data.get_geography(code, level)
except (ValueError, LocationNotFound):
raise Http404
parents = [g.as_dict() for g in geo.ancestors()]
return render_json_to_response(parents)
class TableDetailView(TemplateView):
template_name = 'table/table_detail.html'
def dispatch(self, *args, **kwargs):
try:
self.table = get_datatable(kwargs['table'])
except KeyError:
raise Http404
return super(TableDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, *args, **kwargs):
return {
'table': self.table,
}
|
py | 7dfb7b260ab4efd6a789835dbc96edc3dc5aeea5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Special Pythagorean triplet" – Project Euler Problem No. 9
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=9
#
def isPythagoreanTriple(a, b, c):
if a < b < c:
return a**2 + b**2 == c**2
return False
def findTipleHavingSum(sum):
# small problem size -> brute force it
for a in range(1, sum):
for b in range(a, sum - a + 1):
for c in range(b, sum - b + 1):
if a+b+c == sum:
if isPythagoreanTriple(a, b, c):
return (a, b, c)
sum = 1000
triple = findTipleHavingSum(sum)
print "Triple:", triple
print "Solution:", reduce(lambda product, factor: product * factor, triple)
|
py | 7dfb7c58f9847aaed8067d31c7336c49d992ff58 | #!/usr/bin/env python3
#
# Copyright (C) 2020-2021 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""Toggle settings in `ftoption.h` file based on command-line arguments.
This script takes an `ftoption.h` file as input and rewrites
`#define`/`#undef` lines in it based on `--enable=CONFIG_VARNAME` or
`--disable=CONFIG_VARNAME` arguments passed to it, where `CONFIG_VARNAME` is
configuration variable name, such as `FT_CONFIG_OPTION_USE_LZW`, that may
appear in the file.
Note that if one of `CONFIG_VARNAME` is not found in the input file, this
script exits with an error message listing the missing variable names.
"""
import argparse
import os
import re
import sys
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"input", metavar="FTOPTION_H", help="Path to input ftoption.h file."
)
parser.add_argument("--output", help="Output to file instead of stdout.")
parser.add_argument(
"--enable",
action="append",
default=[],
help="Enable a given build option (e.g. FT_CONFIG_OPTION_USE_LZW).",
)
parser.add_argument(
"--disable",
action="append",
default=[],
help="Disable a given build option.",
)
args = parser.parse_args()
common_options = set(args.enable) & set(args.disable)
if common_options:
parser.error(
"Options cannot be both enabled and disabled: %s"
% sorted(common_options)
)
return 1
with open(args.input) as f:
input_file = f.read()
options_seen = set()
new_lines = []
for line in input_file.splitlines():
# Expected formats:
# #define <CONFIG_VAR>
# /* #define <CONFIG_VAR> */
# #undef <CONFIG_VAR>
line = line.rstrip()
if line.startswith("/* #define ") and line.endswith(" */"):
option_name = line[11:-3].strip()
option_enabled = False
elif line.startswith("#define "):
option_name = line[8:].strip()
option_enabled = True
elif line.startswith("#undef "):
option_name = line[7:].strip()
option_enabled = False
else:
new_lines.append(line)
continue
options_seen.add(option_name)
if option_enabled and option_name in args.disable:
line = "#undef " + option_name
elif not option_enabled and option_name in args.enable:
line = "#define " + option_name
new_lines.append(line)
result = "\n".join(new_lines) + "\n"
# Sanity check that all command-line options were actually processed.
cmdline_options = set(args.enable) | set(args.disable)
assert cmdline_options.issubset(
options_seen
), "Could not find options in input file: " + ", ".join(
sorted(cmdline_options - options_seen)
)
if args.output:
with open(args.output, "w") as f:
f.write(result)
else:
print(result)
return 0
if __name__ == "__main__":
sys.exit(main())
|
py | 7dfb7d0f84b348eedf92941e4eb8b624f5545cdf | from __future__ import print_function
import sys
sys.path.append(".")
from collections import Counter
from itertools import chain
import torch
from utils.math_utils import js_divergence, kl_divergence
from utils.tensor_ops import get_tensor
from metrics.base_metric import BaseEvaluator
# sys.path.append(".")
class CorpusDistribution(object):
@staticmethod
def get_unigram_distribution(examples, vocab):
"""
:param examples: list of sentence
:param vocab:
:return:
"""
unigram_count = [0] * len(vocab)
word_freq = Counter(chain(*examples))
for word in word_freq:
unigram_count[vocab[word]] = word_freq['word']
count = get_tensor(unigram_count)
count += (1.0 / torch.sum(count)) * (count.eq(0.0).float())
# count += 1e-6
return count / torch.sum(count)
class UnigramKLEvaluator(BaseEvaluator):
def _check_format(self, **kwargs):
pass
def __init__(self, ):
super(UnigramKLEvaluator, self).__init__(name="Unigram KL")
def get_evaluate(self, corpus_source, pred_source, vocab, dtype='js'):
"""
:param corpus_source: list of sentence
:param pred_source: list of sentence
:param vocab: VocabularyEntry
:param dtype: "js" or "kl"
:return:
"""
ref_dis = CorpusDistribution.get_unigram_distribution(examples=corpus_source, vocab=vocab)
pre_dis = CorpusDistribution.get_unigram_distribution(examples=pred_source, vocab=vocab)
func = js_divergence if dtype == 'js' else kl_divergence
return func(ref_dis, pre_dis)
if __name__ == "__main__":
train_path = "/home/user_data/baoy/projects/seq2seq_parser/data/snli-sample/train.bin"
dev_path = "/home/user_data/baoy/projects/seq2seq_parser/data/snli-sample/dev.bin"
test_path = "/home/user_data/baoy/projects/seq2seq_parser/data/snli-sample/test.bin"
vocab_file = "/home/user_data/baoy/projects/seq2seq_parser/data/snli-sample/origin_vocab.bin"
plain_file = "./gen.text"
with open(plain_file, 'r') as f:
sample = [line.split(" ") for line in f.readlines()]
from struct_self.dataset import Dataset
from struct_self.vocab import Vocab
vocab = Vocab.from_bin_file(vocab_file)
train_exam = Dataset.from_bin_file(train_path).examples
train = [e.src for e in train_exam]
dev_exam = Dataset.from_bin_file(dev_path).examples
dev = [e.src for e in dev_exam]
test_exam = Dataset.from_bin_file(test_path).examples
test = [e.src for e in test_exam]
t = UnigramKLEvaluator()
# print("train with dev:", t.get_evaluate(train, dev, vocab.src))
# print("train with test:", t.get_evaluate(train, test, vocab.src))
# print("dev with test", t.get_evaluate(dev, test, vocab.src))
# print("test with dev", t.get_evaluate(test, dev, vocab.src))
print("train with sample", t.get_evaluate(test, sample, vocab.src))
|
py | 7dfb8188c7f41bc49bfd02846f14dcc93b945599 | import hashlib
import json
import logging
from dataclasses import asdict, dataclass
import requests
from .config import Przelewy24Config
logger = logging.getLogger(__name__)
@dataclass
class Transaction:
amount: int
sessionId: str
currency: str
description: str
email: str
country: str
language: str
@dataclass
class TransactionDTO:
merchantId: int
posId: int
sessionId: str
amount: int
currency: str
description: str
email: str
country: str
language: str
urlReturn: str
urlStatus: str
sign: str
# cart: List[ItemDTO]
@classmethod
def create_from(
cls,
transaction: Transaction,
config: Przelewy24Config,
sign: str,
success_url: str,
status_url: str,
):
return cls(
merchantId=config.merchant_id,
posId=config.merchant_id,
sessionId=transaction.sessionId,
amount=transaction.amount,
currency=transaction.currency,
description=transaction.description,
email=transaction.email,
country=transaction.country,
language=transaction.language,
urlReturn=success_url,
urlStatus=status_url,
sign=sign,
)
@dataclass
class VerifyDTO:
merchantId: int
posId: int
sessionId: str
amount: int
currency: str
orderId: int
sign: str
@classmethod
def create_from(
cls,
*,
orderId: int,
transaction: Transaction,
config: Przelewy24Config,
sign: str,
):
return cls(
merchantId=config.merchant_id,
posId=config.merchant_id,
sessionId=transaction.sessionId,
amount=transaction.amount,
currency=transaction.currency,
orderId=orderId, # TODO
sign=sign,
)
class Przelewy24API:
def __init__(self, config: Przelewy24Config, session=None):
self._http = session or requests.session()
self._config = config
super().__init__()
def _do(self, method: str, endpoint: str, data=None):
response = self._http.request(
method=method,
url=endpoint,
json=data,
auth=(str(self._config.pos_id), str(self._config.api_key)),
)
logger.debug(
"%s %s: status_code=%s content=%s",
method,
endpoint,
response.status_code,
response.content.decode("utf-8"),
)
if response.status_code != 200:
raise RuntimeError(
f"Przelewy24 returns {response.status_code} instead of 200: {response.content}"
)
return response.json()
def _create_sha386_sign(self, **kwargs) -> str:
return hashlib.sha384(
json.dumps(kwargs).replace(" ", "").encode("utf-8")
).hexdigest()
def testConnection(self) -> bool:
response = self._do("GET", self._config.endpoints.testConnection)
return response["data"]
def register(
self, *, transaction: Transaction, success_url: str, status_url: str
) -> str:
sign = self._config.generate_sign(
sessionId=transaction.sessionId,
merchantId=self._config.merchant_id,
amount=transaction.amount,
currency=transaction.currency,
)
transaction = TransactionDTO.create_from(
transaction, self._config, sign, success_url, status_url
)
payload = asdict(transaction)
response = self._do("POST", self._config.endpoints.transactionRegister, payload)
token = response["data"]["token"]
return f"{self._config.endpoints.transactionRequest}/{token}"
def verify(self, *, transaction: Transaction, orderId: int) -> bool:
sign = self._config.generate_sign(
sessionId=transaction.sessionId,
orderId=orderId,
amount=transaction.amount,
currency=transaction.currency,
)
verify = VerifyDTO.create_from(
orderId=orderId, transaction=transaction, config=self._config, sign=sign
)
payload = asdict(verify)
response = self._do("PUT", self._config.endpoints.transactionVerify, payload)
return response["data"]["status"] == "success"
|
py | 7dfb81931cd8d8fb6d47755ce0abb96a25a6b34e | from .default import Config
class ProductionConfig(Config):
"""
Configurations for Production.
"""
DEBUG = False
TESTING = False
|
py | 7dfb81b5c91c7167ab02477bbe9a891577536fc8 | #!/usr/bin/env python
import os
import sys
from setuptools import setup
try:
from setuptools import find_namespace_packages
except ImportError:
# the user has a downlevel version of setuptools.
print('Error: dbt requires setuptools v40.1.0 or higher.')
print('Please upgrade setuptools with "pip install --upgrade setuptools" '
'and try again')
sys.exit(1)
PSYCOPG2_MESSAGE = '''
No package name override was set.
Using 'psycopg2-binary' package to satisfy 'psycopg2'
If you experience segmentation faults, silent crashes, or installation errors,
consider retrying with the 'DBT_PSYCOPG2_NAME' environment variable set to
'psycopg2'. It may require a compiler toolchain and development libraries!
'''.strip()
def _dbt_psycopg2_name():
# if the user chose something, use that
package_name = os.getenv('DBT_PSYCOPG2_NAME', '')
if package_name:
return package_name
# default to psycopg2-binary for all OSes/versions
print(PSYCOPG2_MESSAGE)
return 'psycopg2-binary'
package_name = "dbt-postgres"
package_version = "0.17.0rc1"
description = """The postgres adpter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md')) as f:
long_description = f.read()
DBT_PSYCOPG2_NAME = _dbt_psycopg2_name()
setup(
name=package_name,
version=package_version,
description=description,
long_description=description,
long_description_content_type='text/markdown',
author="Fishtown Analytics",
author_email="[email protected]",
url="https://github.com/fishtown-analytics/dbt",
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/postgres/dbt_project.yml',
'include/postgres/macros/*.sql',
'include/postgres/macros/**/*.sql',
]
},
install_requires=[
'dbt-core=={}'.format(package_version),
'{}~=2.8'.format(DBT_PSYCOPG2_NAME),
],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
python_requires=">=3.6.2",
)
|
py | 7dfb81e71d7f48b3703c2557c6e8b8b715ec3c22 | from django.contrib import admin
from profiles.models import Profile, Subscription, Notification
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ("user", "email", "feature_subscriptions")
list_editable = ("feature_subscriptions",)
search_fields = ("user__email",)
def email(self, p):
return p.user.email
@admin.register(Subscription)
class SubscriptionAdmin(admin.ModelAdmin):
list_display = ("__str__", "user", "subscription_type", "active")
list_filter = ("active",)
autocomplete_fields = ("sponsor", "bill")
@admin.register(Notification)
class NotificationAdmin(admin.ModelAdmin):
list_display = ("id", "email", "sent", "num_bill_updates", "num_query_updates")
search_fields = ("email",)
ordering = ("sent",)
date_hierarchy = "sent"
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
|
py | 7dfb820cdee2eb6e961388cea5f176f91f575dbf | import pandas as pd
from tqdm import tqdm
from ..util import log
from sklearn.preprocessing import OrdinalEncoder
def auto_encoder(df, df_feature_type, id):
df_copy = df.copy()
label_encoder_list = []
ordinal_encoder_list = []
for f in tqdm(df_feature_type.keys()):
if df_feature_type[f] == 'cat':
label_encoder_list.append(f)
temp = pd.DataFrame(df_copy[f].astype(str))
temp.index = range(len(temp))
temp[f] = temp[[f]].apply(lambda x: x.astype('category').cat.codes)
if id is not None:
if f in id:
df_copy[f + '_encoder'] = temp[f].values
else:
df_copy[f] = temp[f].values
if df_feature_type[f] == 'ord':
ordinal_encoder_list.append(f)
ord_encoder = OrdinalEncoder()
df_copy[f] = ord_encoder.fit_transform(pd.DataFrame(df_copy[f]))
log(f"label_encoder_list: {label_encoder_list}")
log(f"ordinal_encoder_list: {ordinal_encoder_list}")
return df_copy |
py | 7dfb826d8f3bfff1722c79bac6d24da5138ae0d5 | # -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Genomics resource filter expression rewrite backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.core.resource import resource_expr_rewrite
from googlecloudsdk.core.util import times
import six
def _RewriteTimeTerm(key, op, operand):
"""Rewrites <createTime op operand>."""
if op not in ['<', '<=', '=', ':', '>=', '>']:
return None
try:
dt = times.ParseDateTime(operand)
except ValueError as e:
raise ValueError(
'{operand}: date-time value expected for {key}: {error}'
.format(operand=operand, key=key, error=str(e)))
if op == ':':
op = '='
return '{key} {op} "{dt}"'.format(
key=key, op=op, dt=times.FormatDateTime(dt, tzinfo=times.UTC))
class OperationsBackend(resource_expr_rewrite.Backend):
"""Limit filter expressions to those supported by the Genomics backend."""
_FORMAT = '{key} {op} {operand}'
_QUOTED_FORMAT = '{key} {op} "{operand}"'
_TERMS = {
r'^done$': _FORMAT,
r'^error.code$': _FORMAT,
r'^metadata.labels\.(.*)': _QUOTED_FORMAT,
r'^metadata.events$': _QUOTED_FORMAT,
}
_CREATE_TIME_TERMS = [
r'^metadata.create_time$',
r'^metadata.createTime$',
]
def RewriteTerm(self, key, op, operand, key_type):
"""Limit <key op operand> terms to expressions supported by the backend."""
for regex in self._CREATE_TIME_TERMS:
if re.match(regex, key):
return _RewriteTimeTerm(key, op, operand)
for regex, fmt in six.iteritems(self._TERMS):
if re.match(regex, key):
return fmt.format(key=key, op=op, operand=operand)
return None
|
py | 7dfb8272c2d964f14c2f9407c337a8fede9bf073 | """
Bloom filter Python example
===========================
This is a toy implementation of a Bloom filter for educational purposes.
"""
import functools
import hashlib
import math
class BloomFilter:
"""Bloom filter implementation."""
def __init__(self, m, k):
self._m = m
self._k = k
self._bits = [False] * m
self._hash_fns = [functools.partial(self._Hash, i) for i in range(k)]
def _Hash(self, seed, x):
"""This method with different seed values make up the k hash functions."""
h = hashlib.md5()
h.update(b'%d' % seed)
h.update(b'%d' % x)
return int.from_bytes(h.digest(), signed=False, byteorder='big') % self._m
def Add(self, x):
"""Add an element to the set."""
for f in self._hash_fns:
self._bits[f(x)] = True
def Has(self, x):
"""Query the set for an element, may return false positives."""
for f in self._hash_fns:
if not self._bits[f(x)]:
return False
return True
if __name__ == '__main__':
bf = BloomFilter(m=200, k=3)
for x in range(20):
bf.Add(x)
for x in range(21, 100):
if bf.Has(x):
print('False positive:', x) |
py | 7dfb82a5648a61dface400307da2446c50cc41bb | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.format_token."""
import unittest
from lib2to3 import pytree
from lib2to3.pgen2 import token
from yapf.yapflib import format_token
class TabbedContinuationAlignPaddingTest(unittest.TestCase):
def testSpace(self):
align_style = 'SPACE'
pad = format_token._TabbedContinuationAlignPadding(0, align_style, 2, 4)
self.assertEqual(pad, '')
pad = format_token._TabbedContinuationAlignPadding(2, align_style, 2, 4)
self.assertEqual(pad, ' ' * 2)
pad = format_token._TabbedContinuationAlignPadding(5, align_style, 2, 4)
self.assertEqual(pad, ' ' * 5)
def testFixed(self):
align_style = 'FIXED'
pad = format_token._TabbedContinuationAlignPadding(0, align_style, 4, 8)
self.assertEqual(pad, '')
pad = format_token._TabbedContinuationAlignPadding(2, align_style, 4, 8)
self.assertEqual(pad, '\t' * 2)
pad = format_token._TabbedContinuationAlignPadding(5, align_style, 4, 8)
self.assertEqual(pad, '\t' * 2)
def testVAlignRight(self):
align_style = 'VALIGN-RIGHT'
pad = format_token._TabbedContinuationAlignPadding(0, align_style, 4, 8)
self.assertEqual(pad, '')
pad = format_token._TabbedContinuationAlignPadding(2, align_style, 4, 8)
self.assertEqual(pad, '\t')
pad = format_token._TabbedContinuationAlignPadding(4, align_style, 4, 8)
self.assertEqual(pad, '\t')
pad = format_token._TabbedContinuationAlignPadding(5, align_style, 4, 8)
self.assertEqual(pad, '\t' * 2)
class FormatTokenTest(unittest.TestCase):
def testSimple(self):
tok = format_token.FormatToken(pytree.Leaf(token.STRING, "'hello world'"))
self.assertEqual("FormatToken(name=STRING, value='hello world')", str(tok))
self.assertTrue(tok.is_string)
tok = format_token.FormatToken(pytree.Leaf(token.COMMENT, '# A comment'))
self.assertEqual('FormatToken(name=COMMENT, value=# A comment)', str(tok))
self.assertTrue(tok.is_comment)
def testIsMultilineString(self):
tok = format_token.FormatToken(pytree.Leaf(token.STRING, '"""hello"""'))
self.assertTrue(tok.is_multiline_string)
tok = format_token.FormatToken(pytree.Leaf(token.STRING, 'r"""hello"""'))
self.assertTrue(tok.is_multiline_string)
if __name__ == '__main__':
unittest.main()
|
py | 7dfb82d6bbdd6639c774c01a9f4cf027b9729929 | """
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.service_port import ServicePort
globals()['ServicePort'] = ServicePort
class IoArgoprojEventsV1alpha1Service(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'cluster_ip': (str,), # noqa: E501
'ports': ([ServicePort],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'cluster_ip': 'clusterIP', # noqa: E501
'ports': 'ports', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IoArgoprojEventsV1alpha1Service - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
cluster_ip (str): [optional] # noqa: E501
ports ([ServicePort]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IoArgoprojEventsV1alpha1Service - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
cluster_ip (str): [optional] # noqa: E501
ports ([ServicePort]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 7dfb835e41a8a6b5077e9e23781d707b7dd11cee | import random
import numpy as np
from itertools import product
import multiprocessing as mp
import os
import cv2
import pybullet as pb
from pybullet_utils import bullet_client
import pybullet_data
from time import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir_out', default='test/', type=str, help="Where experiments should be saved")
parser.add_argument('--seed', default=1, type=int, help="Random seed")
parser.add_argument('--n_balls', default=3, type=int, help="# of balls in the scene")
parser.add_argument('--n_examples', default=10, type=int, help="# of experiments to generate")
args = parser.parse_args()
COLORS = ['red', 'green', 'blue', 'yellow'][:args.n_balls]
W, H = 112, 112 # Image shape
RANGE_POS = 3
RANGE_SPEED = 3
EPSILON = 100 # Threshold for constraints
def check_bayes(alt_ab, alt_cd, ab, cd):
""" Check the identifiability contraint
:param alt_ab: list of alternative trajectories from AB
:param alt_cd: list of alternative trajectories from CD
:param ab: AB candidate
:param cd: CD candidate
:return: True if experiment is identifiable
"""
for i in range(len(alt_ab)):
if alt_ab[i] == ab:
if alt_cd[i] != cd:
return False
return True
def check_counterfactual(alt_cd, cd, mass_permutation):
""" Check the counterfactuality constraint
:param alt_cd: list of alternative trajectories from CD
:param cd: CD candidate
:param mass_permutation: List of every mass permutation
:return: List of counterfactual objects. Experiment is cf if len()>0
"""
counterfactual_cubes = []
for k in range(cd.n_balls):
alter_cf = cd.confounders.copy()
alter_cf[k] = 1 if alter_cf[k] == 10 else 10
alt_trajectory = alt_cd[mass_permutation.index(alter_cf)]
if alt_trajectory != cd:
counterfactual_cubes.append(k)
return counterfactual_cubes
class Generator:
def __init__(self, dir_out, seed, n_balls, nb_examples):
"""
Class that oversees the experiment generation
:param dir_out: Where experiments should be saved
:param seed: Random seed
:param n_balls: # of balls in the scene
:param nb_examples: # of experiments to generate
"""
self.dir_out = dir_out
self.seed = seed
random.seed(seed)
np.random.seed(seed)
self.mass_permutation = [list(combo) for combo in product([1, 10], repeat=args.n_balls)]
self.logs_cf = {str(d): 0 for d in self.mass_permutation} # Usefull to ensure balance in the dataset
self.n_balls = n_balls
# LOGS variables
self.list_time = []
self.nb_examples = nb_examples
self.total_trial_counter = 0
self.ab_trial_counter = 0
self.cd_trial_counter = 0
def generate(self):
"""Generate the experiments"""
nb_ex = 0
# Choose colors, masses configuration and if we apply a remove do-operation
do_remove_op, index_cf, colors = self.get_configuration_example()
t = time()
while nb_ex < self.nb_examples:
# Step 1 : find a valid AB
self.total_trial_counter += 1
ab = self.find_valid_AB(self.mass_permutation[index_cf])
ab = self.simulate_one(ab, colors)
# Step 2 : find a valid CD
do_op, cf_cubes, cd = self.find_valid_CD(ab, do_remove_op, colors, index_cf)
if cd is not None: # If a valid CD has been found
self.list_time.append(time() - t)
self.logs_cf[str(self.mass_permutation[index_cf])] += 1 # Update the logs for dataset balance
nb_ex += 1
ab, cd = self.simulate_final(ab, cd, colors) # Simulate AB and CD with rendering
self.save(ab, cd, colors, do_op, cf_cubes, nb_ex) # Save the experiment
t = time()
# Choose new configuration
do_remove_op, index_cf, colors = self.get_configuration_example()
def get_configuration_example(self):
"""Sample a do-operation, colors and masses. Try to ensure balance in the masses distribution"""
do_remove_op = random.random() < 0.3 # 30% of chance of being a remove operation
# Search for the masses with the less representation in previous experiments
cf = min(self.mass_permutation, key=lambda x: self.logs_cf[str(x)])
index_cf = self.mass_permutation.index(cf)
# Randomly sample colors
colors = random.sample(COLORS, args.n_balls)
return do_remove_op, index_cf, colors
def find_valid_AB(self, masse):
"""No constraint on A, simply return a random candidate"""
self.ab_trial_counter += 1
candidate = Arena(self.n_balls, masse)
return candidate
def find_valid_CD(self, ab, do_remove_op, colors, index_cf, ):
"""
Search for a valid CD trajectory.
:param ab: AB candidate
:param do_remove_op: Bool, True if the do-op should be a remove op
:param colors: Colors list
:param index_cf: index of the masse configuration in self.mass_configuration
:return: the do-operation parameters, list of counterfactual objects, CD candidate
"""
found_cd = False
n_trials = 0
while found_cd is False and n_trials < 10: # Try 10 different do-op, else quit
self.cd_trial_counter += 1
if do_remove_op:
n_trials = 10
cd = ab.remove_ball() # Remove a (random) ball
do_op = {"operation": "remove", "amplitude": 0, "cube": -1}
else:
do_op, cd = ab.generate_random_do_operation() # Generate a random do-op
if cd != []: # do-op sampling may failed, in this case, try again
if do_op['operation'] is not None:
# Simulate all alternative traj. from CD
alt_cd = self.simulate_all(cd, colors)
cd.trajectory = alt_cd[index_cf].trajectory.copy()
# Check counterfactuality constraint
counterfactual_cubes = check_counterfactual(alt_cd, cd, self.mass_permutation)
if len(counterfactual_cubes) > 0:
# Simulate all alternative traj. from AB
alt_ab = self.simulate_all(ab, colors)
if check_bayes(alt_ab, alt_cd, ab, cd): # Check identifiability constraint
found_cd = True
n_trials += 1
if found_cd:
return do_op, counterfactual_cubes, cd
else:
return None, None, None
def simulate_all(self, tower, colors):
"""
Simulate every outcomes with every mass configuration for a given initial condition
:param tower: initial condition
:param colors: list of object colors
:return: list of outcomes for each mass configuration
"""
towers = [tower.clone(m) for m in self.mass_permutation]
childPipes, parentPipes = [], []
processes = []
# Simulation are multiprocess, to go faster
for pr in range(len(towers)): # Create pipes to get the simulation
parentPipe, childPipe = mp.Pipe()
parentPipes.append(parentPipe)
childPipes.append(childPipe)
for rank in range(len(towers)): # Run the processes
simulator = Simulator(25, 6, 0, W, H) # Simulate at 25 FPS, for 6 second, no substeps.
p = mp.Process(target=simulator.run, args=(childPipes[rank], towers[rank], 0, colors, False))
p.start()
processes.append(p)
for rank, p in enumerate(processes): # Get the simulation
state, _, _, _ = parentPipes[rank].recv()
towers[rank].trajectory = state
p.join()
return towers
def simulate_one(self, arena, colors):
"""
Simulate a single trajectory without rendering
:param arena: initial condition
:param colors: list of colors
:return: outcome
"""
parentPipe, childPipe = mp.Pipe()
simulator = Simulator(25, 6, 0, W, H)
p = mp.Process(target=simulator.run, args=(childPipe, arena, 0, colors, False))
p.start()
state, _, _, _ = parentPipe.recv()
arena.trajectory = state
p.join()
return arena
def simulate_final(self, ab, cd, colors):
"""
Simulate with rendering
:param ab: AB candidate
:param cd: CD candidate
:param colors: colors list
:return: simulated trajectories
"""
childPipes, parentPipes = [], []
for pr in range(2): # Create pipes to get the simulation
parentPipe, childPipe = mp.Pipe()
parentPipes.append(parentPipe)
childPipes.append(childPipe)
simulator = Simulator(25, 6, 0, W, H)
plane_id = random.randint(0, 3)
p_ab = mp.Process(target=simulator.run, args=(childPipes[0], ab, plane_id, colors, True))
p_cd = mp.Process(target=simulator.run, args=(childPipes[1], cd, plane_id, colors, True))
p_ab.start()
p_cd.start()
# Get results for AB and CD
ab.trajectory, ab.rgb, ab.depth, ab.seg = parentPipes[0].recv()
cd.trajectory, cd.rgb, cd.depth, cd.seg = parentPipes[1].recv()
p_ab.join()
p_cd.join()
return ab, cd
def save(self, ab, cd, colors, do_op, cf_cubes, n):
"""
Save the experiment
:param ab: AB candidate
:param cd: CD candidate
:param colors: colors list
:param do_op: do-operation parameters
:param cf_cubes: list of counterfactual cubes
:param n: index of this experiments
:return:
"""
assert ab.confounders == cd.confounders
assert len(cf_cubes) > 0
# Create the paths
out_dir = self.dir_out + str(self.seed) + "_" + str(n) + "/"
os.makedirs(out_dir, exist_ok=True)
os.makedirs(out_dir + 'ab', exist_ok=True)
os.makedirs(out_dir + 'cd', exist_ok=True)
# Add a column of zero if the do-operation is a remove operation
cd.fill_trajectory(ab.n_balls)
# Save ground truth trajectory as numpy array + save confounders
np.save(out_dir + "ab/states.npy", ab.trajectory)
np.save(out_dir + "cd/states.npy", cd.trajectory)
np.save(out_dir + "confounders.npy", ab.confounders)
# Write do-op parameter in a file
with open(out_dir + "do_op.txt", 'w') as f:
if do_op["operation"] == "remove":
f.write(f"Remove the {colors[-1]} cube")
else:
f.write(
f"Move the {colors[do_op['cube']]} cube of {do_op['amplitude']} in the {do_op['operation']} direction")
# Write colors in a file
with open(out_dir + "COLORS.txt", 'w') as f:
f.write(str(colors))
# Write list of cf cubes in a file
with open(out_dir + "cd/counterfactual_balls.txt", 'w') as f:
f.write("Cubes that strongly depend on their masses\n")
f.write('\n'.join([f"idx:{i}, colors={colors[i]}" for i in cf_cubes]))
# SAVE RGB
writer = cv2.VideoWriter(out_dir + 'ab/rgb.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, (W, H))
for rgb in ab.rgb:
writer.write(cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB))
writer.release()
writer = cv2.VideoWriter(out_dir + 'cd/rgb.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, (W, H))
for rgb in cd.rgb:
writer.write(cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB))
writer.release()
# SAVE DEPTH
writer = cv2.VideoWriter(out_dir + 'ab/depth.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, (W, H))
for rgb in ab.depth:
rgb = np.round(rgb * 255)
writer.write(cv2.cvtColor(rgb.astype(np.uint8).reshape((W, H, 1)), cv2.COLOR_GRAY2BGR))
writer.release()
writer = cv2.VideoWriter(out_dir + 'cd/depth.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, (W, H))
for rgb in cd.depth:
rgb = np.round(rgb * 255)
writer.write(cv2.cvtColor(rgb.astype(np.uint8).reshape((W, H, 1)), cv2.COLOR_GRAY2BGR))
writer.release()
# SAVE SEGMENTATION
writer = cv2.VideoWriter(out_dir + 'ab/segmentation.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, (W, H))
for rgb in ab.seg:
writer.write(cv2.cvtColor(rgb.astype(np.uint8).reshape((W, H, 1)), cv2.COLOR_GRAY2BGR))
writer.release()
writer = cv2.VideoWriter(out_dir + 'cd/segmentation.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, (W, H))
for rgb in cd.seg:
writer.write(cv2.cvtColor(rgb.astype(np.uint8).reshape((W, H, 1)), cv2.COLOR_GRAY2BGR))
writer.release()
# Write some logs
with open("logs_create_dataset_" + str(self.seed) + ".txt", "a") as f:
f.write(
f"{n}/{self.nb_examples} in {self.total_trial_counter} trial ({self.ab_trial_counter} on AB, {self.cd_trial_counter} on CD), took {round(self.list_time[-1], 1)} seconds (Average {round(np.mean(self.list_time), 2)})\n")
self.total_trial_counter = 0
self.ab_trial_counter = 0
self.cd_trial_counter = 0
class Arena:
def __init__(self, n_balls, confounders):
"""Class that model a trajectory"""
self.start_position = []
self.start_speed = []
self.n_balls = n_balls
self.confounders = confounders
self.trajectory = None
self.rgb = None
self.depth = None
self.seg = None
self.init()
def init(self):
"""Generate random initial condition"""
for _ in range(self.n_balls):
no_overlap = False # Make sure that there is no overlap between balls
while no_overlap is False:
no_overlap = True
cand_pose = RANGE_POS * (2 * np.random.random(2) - 1) # Generate random position for a ball
for balls in self.start_position:
if np.sqrt(((balls - cand_pose) ** 2).sum()) < 1:
no_overlap = False
self.start_position.append(cand_pose)
self.start_speed.append(RANGE_SPEED * (2 * np.random.random(2) - 1)) # Random speed
def compute_valid_movement_range(self, ball_idx):
"""For do-op sampling : return maximal displacement in each direction
without getting out of the limits"""
x, y = self.start_position[ball_idx]
delta_x = np.array([-3.5 + x, 3.5 - x])
delta_y = np.array([-3.5 + y, 3.5 - y])
return delta_x, delta_y
def remove_ball(self):
"""Remove a random ball from the scene"""
new_arena = Arena(self.n_balls - 1, self.confounders.copy())
new_arena.start_position = [n.copy() for n in self.start_position[:-1]]
new_arena.start_speed = [n.copy() for n in self.start_speed[:-1]]
return new_arena
def clone(self, cf=None):
"""Clone an arena"""
if cf is None:
new_arena = Arena(self.n_balls, self.confounders.copy())
else:
new_arena = Arena(self.n_balls, cf)
new_arena.start_position = [n.copy() for n in self.start_position]
new_arena.start_speed = [n.copy() for n in self.start_speed]
return new_arena
def generate_random_do_operation(self):
"""Return a do-operation candidate"""
ball_idx = random.randint(0, self.n_balls - 1) # Select random ball for do-op
delta_x, delta_y = self.compute_valid_movement_range(ball_idx)
operation = random.choice(['x', 'y']) # Select a direction
delta = delta_x if operation == "x" else delta_y
delta = delta * 0.9
# List of possible do-op (bounded in absolute value)
epsilons = list(np.arange(0.5, delta[1], 0.1)) + list(np.arange(delta[0], -0.5, 0.1))
if len(epsilons) == 0: # If there is no possible movement in this direction, quit...
return {"operation": None, "cube": None, "amplitude": None}, []
# Sample a displacement amplitude
amplitude = random.choices(epsilons, k=1)[0]
cd = self.clone()
# Apply do-op
cd.start_position[ball_idx][0 if operation == "x" else 1] += amplitude
# Check for overlap
for i in cd.start_position:
for j in cd.start_position:
if (i != j).all() and np.sqrt(((i - j) ** 2).sum()) < 1:
return {"operation": None, "cube": None, "amplitude": None}, []
return {"operation": operation, "amplitude": amplitude, "cube": ball_idx}, cd
def fill_trajectory(self, n_balls):
"""Make sure that states are of good shape"""
T, K, S = self.trajectory.shape
if K != n_balls:
self.trajectory = np.concatenate([self.trajectory, np.zeros((T, 1, S))], axis=1)
def __eq__(self, other):
"""Check if two trajectories are equal or not"""
if other == []:
return False
error = np.zeros(self.n_balls)
# Compute MSE on 3D position per object (the measure is independant from the number of object)
for k in range(other.trajectory.shape[1]):
error[k] = np.sqrt(((self.trajectory[:, k, :2] - other.trajectory[:, k, :2]) ** 2).sum(-1)).sum(0)
# If 1 object MSE is above threshold, trajectories are different.
return (error > EPSILON).sum() == 0
class Simulator:
def __init__(self, fps, time_duration, num_substeps=1000, W=448, H=448):
"""
Class that model the physics simulator
:param fps: frame per second
:param time_duration: simulation time length
:param num_substeps: substeps for simulation accuracy
:param W: Width of image
:param H: Height of image
"""
self.fixed_timestep = 1 / fps
self.nb_steps = time_duration * fps
self.num_substeps = num_substeps
self.p = None
self.W, self.H = W, H
def run(self, pipe, arena, plane_id, colors, rendering=False):
"""
Run the simulator
:param pipe: multiprocess pipe to output the results
:param arena: initial condition
:param plane_id: id of the place for the ground
:param colors: colors list
:param rendering: activate or not the rendering
:return: None
"""
# Initialize the simulator
self.p = bullet_client.BulletClient(pb.DIRECT)
self.p.setAdditionalSearchPath(pybullet_data.getDataPath())
self.p.setGravity(0, 0, -10)
self.p.setPhysicsEngineParameter(fixedTimeStep=self.fixed_timestep, numSolverIterations=10000,
solverResidualThreshold=1e-10,
numSubSteps=self.num_substeps)
# Init the environnement
list_cube = self._init(arena, colors, plane_id)
# Logs
seq_states = np.zeros((self.nb_steps, arena.n_balls, 3 + 4 + 3 + 3))
list_rgb = []
list_depth = []
list_seg = []
# Simulate
for t in range(self.nb_steps):
for i, cube in enumerate(list_cube):
pos, angle = self.p.getBasePositionAndOrientation(cube)
vel_pose, vel_angle = self.p.getBaseVelocity(cube)
seq_states[t, i] = list(pos) + list(angle) + list(vel_pose) + list(vel_angle)
if rendering:
img_arr = self.get_rendering()
rgb = img_arr[2][:, :, :3]
list_depth.append(img_arr[3])
list_seg.append(img_arr[4])
list_rgb.append(rgb)
self.p.stepSimulation()
pipe.send((seq_states, list_rgb, list_depth, list_seg))
pipe.close()
def _init(self, arena, colors, plane_id):
"""
Init the scene with corresponding objects
:param arena: initial condition
:param colors: colors list
:param plane_id: index of the ground texture
:return:
"""
# Load ground
pb.loadURDF(f"../data_generation/urdf/plane_{plane_id}/plane.urdf", useMaximalCoordinates=True)
# Walls
limit = 4.3
angle = (np.pi / 2.)
pb.loadURDF(f"../data_generation/urdf/plane_white/plane.urdf", [0, limit, 0],
pb.getQuaternionFromEuler([angle, 0, 0]), useMaximalCoordinates=True)
pb.loadURDF(f"../data_generation/urdf/plane_white/plane.urdf", [0, -limit, 0],
pb.getQuaternionFromEuler([-angle, 0, 0]), useMaximalCoordinates=True)
pb.loadURDF(f"../data_generation/urdf/plane_white/plane.urdf", [limit, 0, 0],
pb.getQuaternionFromEuler([0, -angle, 0]), useMaximalCoordinates=True)
pb.loadURDF(f"../data_generation/urdf/plane_white/plane.urdf", [-limit, 0, 0],
pb.getQuaternionFromEuler([0, angle, 0]), useMaximalCoordinates=True)
# Add balls
list_balls = []
for i in range(arena.n_balls):
color = colors[i]
x, y = arena.start_position[i]
cube = self.p.loadURDF(f"../data_generation/urdf/{color}/ball.urdf",
[x, y, 0.5],
useMaximalCoordinates=True)
pb.changeDynamics(cube, -1,
mass=arena.confounders[i],
lateralFriction=0,
restitution=1) # Change physical parameters
vx, vy = arena.start_speed[i]
pb.resetBaseVelocity(cube, [vx, vy, 0]) # Change initial speed
list_balls.append(cube)
return list_balls
def get_rendering(self):
""" Rendering of the environment """
viewMatrix = pb.computeViewMatrix([0, 0.01, 8], [0, 0, 0], [0, 0, 1])
projectionMatrix = pb.computeProjectionMatrixFOV(60, self.W / self.H, 4, 20)
img_arr = pb.getCameraImage(self.W, self.H, viewMatrix, projectionMatrix,
shadow=0,
lightDirection=[1, 1, 1],
renderer=pb.ER_BULLET_HARDWARE_OPENGL)
return img_arr
if __name__ == '__main__':
g = Generator(dir_out=args.dir_out, seed=args.seed, n_balls=args.n_balls, nb_examples=args.n_examples)
g.generate()
|
py | 7dfb83e1ce0c568e96ac62e3a9176dc99d86f53d | import subprocess
import time
import pyautogui as pgui
def openBrowser():
proc = subprocess.call('firefox', shell=True)
time.sleep(5)
pgui.hotkey('ctrl', 't')
pgui.hotkey('alt', '1')
if __name__ == '__main__':
openBrowser()
|
py | 7dfb85099e48ce5e43b0c751bcf616755f9bc257 | from aiochan import *
async def pass_on(left, right):
value = await left.get()
await right.put(1 + value)
print(f'Left[{value}] Right[{value + 1}]')
async def main():
n = 6
left = None
rightmost = Chan()
right = rightmost
for _ in range(n):
left = Chan()
go(pass_on(left, right))
right = left
print('Coroutines are waiting')
async def giver(c):
print('Give Gopher1 the initial value')
await c.put(1)
go(giver(left))
print('Final value: ' + str(await rightmost.get()))
if __name__ == '__main__':
run_in_thread(main())
|
py | 7dfb866d41e0786f9c1543165a02f38913ebb8c6 | # coding=utf-8
from prototypical_batch_sampler import PrototypicalBatchSampler
from prototypical_loss import prototypical_loss as loss_fn
from chinadrinks_dataset import ChinadrinkDataset
from protonet import ProtoNet
from parser_util_extract import get_parser
from tensorboardX import SummaryWriter
from tqdm import tqdm
import numpy as np
import torch
import os
import random
import shutil
import pickle
def init_seed(opt):
'''
Disable cudnn to maximize reproducibility
'''
torch.cuda.cudnn_enabled = False
np.random.seed(opt.manual_seed)
torch.manual_seed(opt.manual_seed)
torch.cuda.manual_seed(opt.manual_seed)
def init_dataset(opt, mode, root):
dataset = ChinadrinkDataset(mode=mode, root= root, size = opt.img_size)
n_classes = len(np.unique(dataset.y))
print(n_classes)
#print(dataset.y)
if n_classes < opt.classes_per_it_val:
raise(Exception('There are not enough classes in the dataset in order ' +
'to satisfy the chosen classes_per_it. Decrease the ' +
'classes_per_it_{tr/val} option and try again.'))
return dataset
def init_sampler(opt, labels, mode):
if 'train' in mode:
classes_per_it = opt.classes_per_it_tr
#classes_per_it = 1034
num_samples = opt.num_support_tr + opt.num_query_tr
else:
classes_per_it = opt.classes_per_it_val
#classes_per_it = 434
num_samples = opt.num_support_val + opt.num_query_val
return PrototypicalBatchSampler(labels=labels,
classes_per_it=classes_per_it,
num_samples=num_samples,
iterations=opt.iterations)
def init_dataloader(opt, mode, root):
dataset = init_dataset(opt, mode, root)
#labels = [int(x) for x in dataset.y]
sampler = init_sampler(opt, dataset.y, mode)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=sampler)
torch.cuda.empty_cache()
return dataloader
def init_protonet(opt, pretrained_file= "", pretrained = False):
'''
Initialize the ProtoNet
'''
device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'
model = ProtoNet().to(device)
if(pretrained):
model.load_state_dict(torch.load(pretrained_file))
print("Loaded pre-trained model")
return model
def train(opt, tr_dataloader, model):
'''
Train the model with the prototypical learning algorithm
'''
#writer = SummaryWriter('/home/caffe/orbix/Prototypical-Networks-for-Few-shot-Learning-PyTorch/logs/Chinadrink_Protonet_22_dropout')
device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'
for epoch in range(opt.epochs):
torch.cuda.empty_cache()
print('=== Epoch: {} ==='.format(epoch))
tr_iter = iter(tr_dataloader)
model.train()
torch.cuda.empty_cache()
for batch in tqdm(tr_iter):
#optim.zero_grad()
x, y = batch
x, y = x.to(device), y.to(device)
model_output = model(x)
return model_output,y
def test(opt, test_dataloader, model):
'''
Test the model trained with the prototypical learning algorithm
'''
device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'
#writer = SummaryWriter('/home/caffe/orbix/Prototypical-Networks-for-Few-shot-Learning-PyTorch/logs/Chinadrink_Protonet_22_dropout')
avg_acc = list()
for epoch in range(10):
test_iter = iter(test_dataloader)
for batch in test_iter:
x, y = batch
x, y = x.to(device), y.to(device)
model_output = model(x)
return model_output,y
def eval(opt):
'''
Initialize everything and train
'''
model = init_protonet(options)
model_path = os.path.join(opt.experiment_root, 'best_model.pth')
model.load_state_dict(torch.load(model_path))
test(opt=options,
test_dataloader=test_dataloader,
model=model)
def main():
'''
Initialize everything and train
'''
options = get_parser().parse_args()
if not os.path.exists(options.experiment_root):
os.makedirs(options.experiment_root)
if torch.cuda.is_available() and not options.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
init_seed(options)
#dataset_root = options.dataset_root
train_folder ='/home/caffe/data/chinadrink_prod_train'
test_folder = '/home/caffe/data/chinadrink_test/all_cropped_images/'
filepath = '/home/caffe/orbix/Prototypical-Networks-for-Few-shot-Learning-PyTorch/output/best_model_rgb28.pth'
train_features_file = '/home/caffe/data/chinadrink'
test_features_file = '/home/caffe/data/chinadrink'
'''
tr_dataloader = init_dataloader(options, 'train', root = train_folder)
'''
model = init_protonet(opt = options,pretrained_file = filepath, pretrained = True)
'''
train_features, train_labels = train(opt=options,
tr_dataloader=tr_dataloader,
model=model)
#save train features
np.save(train_features_file+'train_features_rgb', train_features.cpu().detach().numpy())
#save train labels
with open(train_features_file+'train_labels_rgb.pkl','wb') as f:
pickle.dump(train_labels.cpu(),f)
print('Loaded train features/labels')
'''
test_dataloader = init_dataloader(options, 'test', root = test_folder)
test_features, test_labels = test(opt=options,
test_dataloader=test_dataloader,
model=model)
print(test_features.size())
#save test features
np.save(test_features_file+'test_features_rgb', test_features.cpu().detach().numpy())
#save test labels
with open(test_features_file+'test_labels_rgb.pkl','wb') as f:
pickle.dump(test_labels.cpu(),f)
if __name__ == '__main__':
main()
|
py | 7dfb86a7a985f2da78c0ab13f2e35d6842a89b7e | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import Resample
from monai.transforms.utils import create_grid
from tests.utils import TEST_NDARRAYS, assert_allclose
TESTS = []
for p in TEST_NDARRAYS:
for q in TEST_NDARRAYS:
for device in [None, "cpu", "cuda"] if torch.cuda.is_available() else [None, "cpu"]:
TESTS.append(
[
dict(padding_mode="zeros", device=device),
{"grid": p(create_grid((2, 2))), "img": q(np.arange(4).reshape((1, 2, 2)))},
q(np.array([[[0.0, 1.0], [2.0, 3.0]]])),
]
)
TESTS.append(
[
dict(padding_mode="zeros", device=device),
{"grid": p(create_grid((4, 4))), "img": q(np.arange(4).reshape((1, 2, 2)))},
q(
np.array(
[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 2.0, 3.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
)
),
]
)
TESTS.append(
[
dict(padding_mode="border", device=device),
{"grid": p(create_grid((4, 4))), "img": q(np.arange(4).reshape((1, 2, 2)))},
q(
np.array(
[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 3, 3.0], [2.0, 2.0, 3.0, 3.0]]]
)
),
]
)
TESTS.append(
[
dict(padding_mode="reflection", device=device),
{"grid": p(create_grid((4, 4))), "img": q(np.arange(4).reshape((1, 2, 2))), "mode": "nearest"},
q(
np.array(
[[[3.0, 2.0, 3.0, 2.0], [1.0, 0.0, 1.0, 0.0], [3.0, 2.0, 3.0, 2.0], [1.0, 0.0, 1.0, 0.0]]]
)
),
]
)
TESTS.append(
[
dict(padding_mode="zeros", device=device),
{
"grid": p(create_grid((4, 4, 4))),
"img": q(np.arange(8).reshape((1, 2, 2, 2))),
"mode": "bilinear",
},
q(
np.array(
[
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 2.0, 3.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 4.0, 5.0, 0.0],
[0.0, 6.0, 7.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
]
]
)
),
]
)
TESTS.append(
[
dict(padding_mode="border", device=device),
{
"grid": p(create_grid((4, 4, 4))),
"img": q(np.arange(8).reshape((1, 2, 2, 2))),
"mode": "bilinear",
},
q(
np.array(
[
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[2.0, 2.0, 3.0, 3.0],
[2.0, 2.0, 3.0, 3.0],
],
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[2.0, 2.0, 3.0, 3.0],
[2.0, 2.0, 3.0, 3.0],
],
[
[4.0, 4.0, 5.0, 5.0],
[4.0, 4.0, 5.0, 5.0],
[6.0, 6.0, 7.0, 7.0],
[6.0, 6.0, 7.0, 7.0],
],
[
[4.0, 4.0, 5.0, 5.0],
[4.0, 4.0, 5.0, 5.0],
[6.0, 6.0, 7.0, 7.0],
[6.0, 6.0, 7.0, 7.0],
],
]
]
)
),
]
)
class TestResample(unittest.TestCase):
@parameterized.expand(TESTS)
def test_resample(self, input_param, input_data, expected_val):
g = Resample(**input_param)
result = g(**input_data)
if "device" in input_data:
self.assertEqual(result.device, input_data["device"])
assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
unittest.main()
|
py | 7dfb870d895087c7be226f3f2545f1525768d5fa | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import re
import subprocess # nosec
import sys
import yaml
# NOTE(SamYaple): Update the search path to prefer PROJECT_ROOT as the source
# of packages to import if we are using local tools instead of
# pip installed kolla tools
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..'))
if PROJECT_ROOT not in sys.path:
sys.path.insert(0, PROJECT_ROOT)
from kolla.common import config # noqa
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
RELEASE_REPO = 'https://github.com/openstack/releases'
TARGET = '.releases'
SKIP_PROJECTS = {
'crane': 'Crane is not managed by openstack/releases project',
'gnocchi-base': 'Gnocchi is not managed by openstack/releases project',
'monasca-thresh': 'Package not published in tarballs.openstack.org',
'rally': 'Rally is not managed by openstack/releases project',
}
RE_DEFAULT_BRANCH = re.compile('^defaultbranch=stable/(.*)')
RE_FILENAME = re.compile('(?P<project_name>.*)-(?P<tag>[^-]*).tar.gz')
def update_releases_repo():
if not os.path.exists(TARGET):
cmd = ['git', 'clone', RELEASE_REPO, TARGET]
else:
cmd = ['git', '--git-dir', os.path.join(TARGET, '.git'), '--work-tree',
TARGET, 'pull']
subprocess.call(cmd) # nosec
def get_default_branch():
gitreview_file = os.path.join(PROJECT_ROOT, '.gitreview')
if not os.path.exists(gitreview_file):
return
with open(gitreview_file, 'r') as gitreview:
for line in gitreview:
branches = RE_DEFAULT_BRANCH.findall(line)
if branches:
return branches[0]
def load_all_info(openstack_release):
projects = {}
release_path = os.path.join(TARGET, 'deliverables', openstack_release)
if not os.path.exists(release_path):
raise ValueError(
'Can not find openstack release: "%s"' % openstack_release)
for deliverable in os.listdir(release_path):
if not deliverable.endswith('.yaml'):
continue
with open(os.path.join(release_path, deliverable)) as f:
info = yaml.safe_load(f)
if 'releases' in info and len(info['releases']) > 0:
latest_release = info['releases'][-1]
latest_version = latest_release['version']
for project in latest_release['projects']:
project_name = project['repo'].split('/')[-1]
if 'tarball-base' in project:
tarball_base = project['tarball-base']
elif 'repository-settings' in info:
try:
repo = project['repo']
repository_settings = info['repository-settings'][repo]
tarball_base = repository_settings['tarball-base']
except KeyError:
tarball_base = project_name
projects[project_name] = {'latest_version': latest_version,
'tarball_base': tarball_base}
projects[tarball_base] = {'latest_version': latest_version,
'tarball_base': tarball_base}
return projects
def main():
parser = argparse.ArgumentParser(
description='Check and update OpenStack service version.')
parser.add_argument('--openstack-release', '-r',
default=get_default_branch(),
help='OpenStack release name')
parser.add_argument('--include-independent', '-i',
default=False, action='store_true',
help='Whether update independent projects')
parser.add_argument('--check', '-c',
default=False, action='store_true',
help='Run without update config.py file')
conf = parser.parse_args(sys.argv[1:])
if not conf.openstack_release:
raise ValueError('Can not detect openstack release. Please assign'
' it through "--openstack-release" parameter')
LOG.info('Update using openstack release: "%s"', conf.openstack_release)
if conf.check:
LOG.info('Run in check only mode')
update_releases_repo()
projects = load_all_info(openstack_release=conf.openstack_release)
independents_projects = load_all_info(openstack_release='_independent')
with open(os.path.join(PROJECT_ROOT, 'kolla/common/config.py')) as f:
config_py = f.read()
for key in sorted(config.SOURCES):
independent_project = False
value = config.SOURCES[key]
if key in SKIP_PROJECTS:
LOG.info('%s is skipped: %s', key, SKIP_PROJECTS[key])
continue
# get project name from location
location = value['location']
filename = os.path.basename(location)
match = RE_FILENAME.match(filename)
if match:
project_name, old_tag = match.groups()
else:
raise ValueError('Can not parse "%s"' % filename)
if project_name == "requirements":
# Use the stable branch for requirements.
latest_tag = "stable-{}".format(conf.openstack_release)
tarball_base = project_name
elif project_name in projects:
latest_tag = projects[project_name]['latest_version']
tarball_base = projects[project_name]['tarball_base']
elif project_name in independents_projects:
latest_tag = independents_projects[project_name]['latest_version']
tarball_base = independents_projects[project_name]['tarball_base']
independent_project = True
else:
LOG.warning('Can not find %s project release',
project_name)
continue
if latest_tag and old_tag != latest_tag:
if independent_project and not conf.include_independent:
LOG.warning('%s is an independent project, please update it'
' manually. Possible need upgrade from %s to %s',
project_name, old_tag, latest_tag)
continue
LOG.info('Update %s from %s to %s %s', project_name, old_tag,
tarball_base, latest_tag)
# starting "'" to replace whole filenames not partial ones
# so nova does not change blazar-nova
old_str = "'{}-{}".format(project_name, old_tag)
new_str = "'{}-{}".format(tarball_base, latest_tag)
config_py = config_py.replace(old_str, new_str)
if not conf.check:
with open(os.path.join(PROJECT_ROOT, 'kolla/common/config.py'),
'w') as f:
f.write(config_py)
if __name__ == '__main__':
main()
|
py | 7dfb87541ca8d27c649cb16cc1399a19a02d70a8 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import os
import posixpath
from pyiron_atomistics.atomistics.structure.atoms import CrystalStructure
from pyiron_atomistics.vasp.base import Input, Output
from pyiron_atomistics import Project
from pyiron_base import state, ProjectHDFio
from pyiron_atomistics.vasp.potential import VaspPotentialSetter
from pyiron_atomistics.vasp.vasp import Vasp
from pyiron_atomistics.vasp.metadyn import VaspMetadyn
from pyiron_atomistics.vasp.structure import read_atoms
import numpy as np
import warnings
__author__ = "Sudarsan Surendralal"
class TestVasp(unittest.TestCase):
"""
Tests the pyiron_atomistics.objects.hamilton.dft.vasp.Vasp class
"""
@classmethod
def setUpClass(cls):
state.update({'resource_paths': os.path.join(os.path.dirname(os.path.abspath(__file__)), "../static")})
cls.execution_path = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.execution_path, "test_vasp"))
cls.job = cls.project.create_job("Vasp", "trial")
cls.job_spin = cls.project.create_job("Vasp", "spin")
cls.job_spin.structure = CrystalStructure("Fe", BravaisBasis="bcc", a=2.83)
cls.job_spin.structure = cls.job_spin.structure.repeat(2)
cls.job_spin.structure[2] = "Se"
cls.job_spin.structure[3] = "O"
cls.job_metadyn = cls.project.create_job("VaspMetadyn", "trial_metadyn")
cls.job_complete = Vasp(
project=ProjectHDFio(project=cls.project, file_name="vasp_complete"),
job_name="vasp_complete",
)
poscar_file = posixpath.join(
cls.execution_path, "../static/vasp_test_files/full_job_sample/POSCAR"
)
cls.job_complete.structure = read_atoms(poscar_file, species_from_potcar=True)
poscar_file = posixpath.join(
cls.execution_path, "../static/vasp_test_files/poscar_samples/POSCAR_metadyn"
)
cls.job_metadyn.structure = read_atoms(poscar_file)
@classmethod
def tearDownClass(cls):
cls.execution_path = os.path.dirname(os.path.abspath(__file__))
project = Project(os.path.join(cls.execution_path, "test_vasp"))
project.remove_jobs_silently(recursive=True)
project.remove(enable=True)
state.update()
def setUp(self):
self.job.structure = None
def test_list_potentials(self):
self.assertRaises(ValueError, self.job.list_potentials)
self.assertEqual(sorted([
'Fe', 'Fe_GW', 'Fe_pv', 'Fe_sv', 'Fe_sv_GW', 'Se', 'Se_GW',
'O', 'O_GW', 'O_GW_new', 'O_h', 'O_s', 'O_s_GW'
]), sorted(self.job_spin.list_potentials()))
self.assertEqual(
sorted(['Fe', 'Fe_GW', 'Fe_pv', 'Fe_sv', 'Fe_sv_GW']),
sorted(self.job_complete.list_potentials())
)
self.job_spin.potential["Fe"] = 'Fe_sv_GW'
self.job_complete.potential.Fe = 'Fe_sv_GW'
self.assertEqual('Fe_sv_GW', list(self.job_spin.potential.to_dict().values())[0])
self.assertEqual('Fe_sv_GW', list(self.job_complete.potential.to_dict().values())[0])
self.job_complete.potential["Fe"] = 'Fe'
self.job_spin.potential.Fe = 'Fe'
def test_init(self):
self.assertEqual(self.job.__name__, "Vasp")
self.assertEqual(self.job._sorted_indices, None)
self.assertIsInstance(self.job.input, Input)
self.assertIsInstance(self.job._output_parser, Output)
self.assertIsInstance(self.job._potential, VaspPotentialSetter)
self.assertTrue(self.job._compress_by_default)
self.assertEqual(self.job.get_eddrmm_handling(), "warn")
self.assertIsInstance(self.job_metadyn, Vasp)
self.assertIsInstance(self.job_metadyn, VaspMetadyn)
self.assertTrue(self.job_metadyn.input.incar["LBLUEOUT"])
def test_eddrmm(self):
self.job.set_eddrmm_handling("ignore")
self.assertEqual(self.job.get_eddrmm_handling(), "ignore")
self.job.set_eddrmm_handling("restart")
self.assertEqual(self.job.get_eddrmm_handling(), "restart")
self.job.set_eddrmm_handling()
self.assertEqual(self.job.get_eddrmm_handling(), "warn")
self.assertRaises(ValueError, self.job.set_eddrmm_handling, status="blah")
def test_rwigs(self):
rwigs_dict = {"Fe": 1.1, "Se": 2.2, "O": 3.3, "N": 4.4}
rwigs_dict_wrong_1 = {"Fe": "not a float", "Se": 2.2, "O": 3.3, "N": 4.4}
rwigs_dict_wrong_2 = {"Fe": 1.1}
self.assertIsNone(self.job_spin.get_rwigs())
self.assertRaises(AssertionError, self.job_spin.set_rwigs, rwigs_dict="not a dict")
self.assertRaises(ValueError, self.job_spin.set_rwigs, rwigs_dict=rwigs_dict_wrong_1)
self.assertRaises(ValueError, self.job_spin.set_rwigs, rwigs_dict=rwigs_dict_wrong_2)
self.job_spin.set_rwigs(rwigs_dict)
rwigs_dict_out = self.job_spin.get_rwigs()
for key in rwigs_dict_out.keys():
self.assertEqual(rwigs_dict_out[key], rwigs_dict[key])
def test_spin_constraints(self):
self.job_spin.spin_constraints = 1
self.assertTrue(self.job_spin.spin_constraints)
self.job_spin.spin_constraints = 2
self.assertTrue(self.job_spin.spin_constraints)
del self.job_spin.input.incar["I_CONSTRAINED_M"]
self.assertFalse(self.job_spin.spin_constraints)
def test_spin_constraint(self):
rwigs_dict = {"Fe": 1.1, "Se": 2.2, "O": 3.3, "N": 4.4}
self.assertRaises(
AssertionError,
self.job_spin.set_spin_constraint,
lamb=0.5,
rwigs_dict=rwigs_dict,
direction="not a bool",
norm=False
)
self.assertRaises(
AssertionError,
self.job_spin.set_spin_constraint,
lamb=0.5,
rwigs_dict=rwigs_dict,
direction=True,
norm="not a bool"
)
self.assertRaises(
AssertionError,
self.job_spin.set_spin_constraint,
lamb="not a float",
rwigs_dict=rwigs_dict,
direction=True,
norm=False
)
self.assertRaises(
ValueError,
self.job_spin.set_spin_constraint,
lamb=0.5,
rwigs_dict=rwigs_dict,
direction=False,
norm=False
)
self.assertRaises(
ValueError,
self.job_spin.set_spin_constraint,
lamb=0.5,
rwigs_dict=rwigs_dict,
direction=False,
norm=True
)
self.job_spin.set_spin_constraint(lamb=0.5, rwigs_dict=rwigs_dict, direction=True, norm=False)
self.assertEqual(self.job_spin.input.incar["LAMBDA"], 0.5)
self.assertEqual(self.job_spin.input.incar["I_CONSTRAINED_M"], 1)
rwigs_dict_out = self.job_spin.get_rwigs()
for key in rwigs_dict_out.keys():
self.assertEqual(rwigs_dict_out[key], rwigs_dict[key])
self.job_spin.set_spin_constraint(lamb=0.5, rwigs_dict=rwigs_dict, direction=True, norm=True)
self.assertEqual(self.job_spin.input.incar["I_CONSTRAINED_M"], 2)
def test_potential(self):
self.assertEqual(self.job.potential, self.job._potential)
def test_plane_wave_cutoff(self):
self.assertIsInstance(self.job.plane_wave_cutoff, (float, int, type(None)))
# self.assertIsInstance(self.job.plane_wave_cutoff, (float, int))
self.job.plane_wave_cutoff = 350
self.assertEqual(self.job.input.incar["ENCUT"], 350)
self.assertEqual(self.job.plane_wave_cutoff, 350)
self.assertEqual(self.job.plane_wave_cutoff, self.job.encut)
self.job.encut = 450
self.assertEqual(self.job.encut, 450)
self.assertEqual(self.job.input.incar["ENCUT"], 450)
self.assertEqual(self.job.plane_wave_cutoff, 450)
def test_exchange_correlation_functional(self):
self.assertEqual(self.job.exchange_correlation_functional, "GGA")
self.assertEqual(self.job.input.potcar["xc"], "GGA")
self.job.exchange_correlation_functional = "LDA"
self.assertEqual(self.job.exchange_correlation_functional, "LDA")
self.assertEqual(self.job.input.potcar["xc"], "LDA")
def test_get_nelect(self):
atoms = CrystalStructure("Pt", BravaisBasis="fcc", a=3.98)
self.job.structure = atoms
self.assertEqual(self.job.get_nelect(), 10)
def test_write_magmoms(self):
magmom = np.arange(8.)
magmom_ncl = np.zeros([8, 3])
magmom_ncl[:, 0] = magmom / 2
magmom_ncl[:, 1] = magmom
magmom_ncl[:, 2] = magmom ** 2
magmom_str = "0.0 1.0 2.0 3.0 4.0 5.0 6.0 7.0"
magmom_ncl_str =\
"0.0 0.0 0.0 0.5 1.0 1.0 1.0 2.0 4.0 1.5 3.0 9.0 " \
"2.0 4.0 16.0 2.5 5.0 25.0 3.0 6.0 36.0 3.5 7.0 49.0"
self.job.structure = CrystalStructure("Fe", BravaisBasis="bcc", a=2.83)
self.job.structure = self.job.structure.repeat(2)
self.job.structure.set_initial_magnetic_moments(magmom)
self.job.input.incar["ISPIN"] = 1
self.job.write_magmoms()
self.assertIsNone(self.job.input.incar["MAGMOM"])
self.assertEqual(self.job.input.incar["ISPIN"], 1)
del self.job.input.incar["ISPIN"]
self.job.write_magmoms()
self.assertEqual(self.job.input.incar["ISPIN"], 2)
self.assertEqual(self.job.input.incar["MAGMOM"], magmom_str)
del self.job.input.incar["MAGMOM"]
self.job.structure.set_initial_magnetic_moments(magmom_ncl)
self.job.set_spin_constraint(lamb=1.0, rwigs_dict={"Fe": 2.5}, direction=True, norm=True)
self.job.write_magmoms()
self.assertEqual(self.job.input.incar["LNONCOLLINEAR"], True)
self.assertEqual(self.job.input.incar["MAGMOM"], magmom_ncl_str)
self.assertEqual(self.job.input.incar["M_CONSTR"], magmom_ncl_str)
del self.job.input.incar["MAGMOM"]
del self.job.input.incar["M_CONSTR"]
del self.job.input.incar["LNONCOLLINEAR"]
del self.job.input.incar["RWIGS"]
self.assertRaises(ValueError, self.job.write_magmoms)
self.job.input.incar["RWIGS"] = "2.5"
del self.job.input.incar["LAMBDA"]
self.assertRaises(ValueError, self.job.write_magmoms)
def test_set_empty_states(self):
atoms = CrystalStructure("Pt", BravaisBasis="fcc", a=3.98)
self.job.structure = atoms
self.job.set_empty_states(n_empty_states=10)
self.assertEqual(self.job.input.incar["NBANDS"], 15)
self.job.structure = atoms.repeat([3, 1, 1])
self.job.set_empty_states(n_empty_states=10)
self.assertEqual(self.job.input.incar["NBANDS"], 25)
def test_set_occpuancy_smearing(self):
job_smear = self.project.create_job("Vasp", "smearing")
self.assertIsNone(job_smear.input.incar["ISMEAR"])
self.assertIsNone(job_smear.input.incar["SIGMA"])
job_smear.set_occupancy_smearing(smearing="methfessel_paxton")
self.assertEqual(job_smear.input.incar["ISMEAR"], 1)
job_smear.set_occupancy_smearing(smearing="methfessel_paxton", order=2)
self.assertEqual(job_smear.input.incar["ISMEAR"], 2)
job_smear.set_occupancy_smearing(smearing="Fermi", width=0.1)
self.assertEqual(job_smear.input.incar["ISMEAR"], -1)
self.assertEqual(job_smear.input.incar["SIGMA"], 0.1)
job_smear.set_occupancy_smearing(smearing="Gaussian", width=0.1)
self.assertEqual(job_smear.input.incar["ISMEAR"], 0)
self.assertEqual(job_smear.input.incar["SIGMA"], 0.1)
with warnings.catch_warnings(record=True) as w:
job_smear.set_occupancy_smearing(smearing="Gaussian", ismear=10)
self.assertEqual(job_smear.input.incar["ISMEAR"], 10)
self.assertEqual(len(w), 1)
self.assertRaises(ValueError, job_smear.set_occupancy_smearing, smearing="gibberish")
def test_calc_static(self):
self.job.calc_static(
electronic_steps=90,
retain_charge_density=True,
retain_electrostatic_potential=True,
)
self.assertEqual(self.job.input.incar["IBRION"], -1)
self.assertEqual(self.job.input.incar["NELM"], 90)
self.assertEqual(self.job.input.incar["LVTOT"], True)
self.assertEqual(self.job.input.incar["LCHARG"], True)
def test_set_structure(self):
self.assertEqual(self.job.structure, None)
atoms = CrystalStructure("Pt", BravaisBasis="fcc", a=3.98)
self.job.structure = atoms
self.assertEqual(self.job.structure, atoms)
self.job.structure = None
self.assertEqual(self.job.structure, None)
self.job.structure = atoms
self.assertEqual(self.job.structure, atoms)
def test_run_complete(self):
self.job_complete.exchange_correlation_functional = "PBE"
self.job_complete.set_occupancy_smearing(smearing="fermi", width=0.2)
self.job_complete.calc_static()
self.job_complete.set_convergence_precision(electronic_energy=1e-7)
self.job_complete.write_electrostatic_potential = False
self.assertEqual(self.job_complete.input.incar["SIGMA"], 0.2)
self.assertEqual(self.job_complete.input.incar["LVTOT"], False)
self.assertEqual(self.job_complete.input.incar["EDIFF"], 1e-7)
file_directory = posixpath.join(
self.execution_path, "../static/vasp_test_files/full_job_sample"
)
self.job_complete.restart_file_list.append(
posixpath.join(file_directory, "vasprun.xml")
)
self.job_complete.restart_file_list.append(
posixpath.join(file_directory, "OUTCAR")
)
self.job_complete.restart_file_list.append(
posixpath.join(file_directory, "CHGCAR")
)
self.job_complete.restart_file_list.append(
posixpath.join(file_directory, "WAVECAR")
)
self.job_complete.run(run_mode="manual")
self.job_complete.status.collect = True
self.job_complete.run()
nodes = [
"positions",
"temperature",
"energy_tot",
"steps",
"positions",
"forces",
"cells",
"pressures",
]
with self.job_complete.project_hdf5.open("output/generic") as h_gen:
hdf_nodes = h_gen.list_nodes()
self.assertTrue(all([node in hdf_nodes for node in nodes]))
nodes = [
"energy_free",
"energy_int",
"energy_zero",
"final_magmoms",
"magnetization",
"n_elect",
"scf_dipole_mom",
"scf_energy_free",
"scf_energy_int",
"scf_energy_zero",
]
with self.job_complete.project_hdf5.open("output/generic/dft") as h_dft:
hdf_nodes = h_dft.list_nodes()
self.assertTrue(all([node in hdf_nodes for node in nodes]))
nodes = ["efermi", "eig_matrix", "k_points", "k_weights", "occ_matrix"]
with self.job_complete.project_hdf5.open(
"output/electronic_structure"
) as h_dft:
hdf_nodes = h_dft.list_nodes()
self.assertTrue(all([node in hdf_nodes for node in nodes]))
job_chg_den = self.job_complete.restart_from_charge_density(job_name="chg")
self.assertEqual(job_chg_den.structure, self.job_complete.get_structure(-1))
self.assertTrue(
posixpath.join(self.job_complete.working_directory, "CHGCAR")
in job_chg_den.restart_file_list
)
def check_group_is_empty(example_job, group_name):
with example_job.project_hdf5.open(group_name) as h_gr:
self.assertTrue(h_gr.list_nodes() == [])
self.assertTrue(h_gr.list_groups() == [])
check_group_is_empty(job_chg_den, "output")
job_chg_wave = self.job_complete.restart_from_wave_and_charge(
job_name="chg_wave"
)
self.assertEqual(job_chg_wave.structure, self.job_complete.get_structure(-1))
self.assertTrue(
posixpath.join(self.job_complete.working_directory, "WAVECAR")
in job_chg_wave.restart_file_list
)
self.assertTrue(
posixpath.join(self.job_complete.working_directory, "CHGCAR")
in job_chg_wave.restart_file_list
)
for key, val in job_chg_wave.restart_file_dict.items():
self.assertTrue(key, val)
check_group_is_empty(job_chg_wave, "output")
job = self.job_complete.restart()
job.restart_file_list.append(
posixpath.join(file_directory, "vasprun.xml")
)
job.restart_file_list.append(
posixpath.join(file_directory, "OUTCAR")
)
job.run(run_mode="manual")
job.status.collect = True
job.run()
# Check if error raised if the files don't exist
self.assertRaises(FileNotFoundError, job.restart_from_wave_functions, "wave_restart")
self.assertRaises(FileNotFoundError, job.restart_from_charge_density, "chg_restart")
self.assertRaises(FileNotFoundError, job.restart_from_wave_and_charge, "wave_chg_restart")
def test_vasp_metadyn(self):
self.job_metadyn.set_primitive_constraint("bond_1", "bond", atom_indices=[0, 2], increment=1e-4)
self.job_metadyn.set_primitive_constraint("bond_2", "bond", atom_indices=[0, 3], increment=1e-4)
self.job_metadyn.set_complex_constraint("combine", "linear_combination", {"bond_1": 1, "bond_2": -1},
increment=1e-4)
self.job_metadyn.write_constraints()
constraints = self.job_metadyn.input.iconst._dataset["Value"]
for val in ['R 1 6 0', 'R 1 2 0', 'S 1 -1 0']:
self.assertTrue(val in constraints)
def test_setting_input(self):
self.job.set_convergence_precision(electronic_energy=1e-7, ionic_force_tolerance=0.1)
self.assertEqual(self.job.input.incar["EDIFF"], 1e-7)
self.assertEqual(self.job.input.incar["EDIFFG"], -0.1)
self.job.calc_minimize()
self.assertEqual(self.job.input.incar["EDIFFG"], -0.01)
self.job.calc_minimize(ionic_energy=1e-4)
self.assertEqual(self.job.input.incar["EDIFFG"], 0.0001)
self.job.calc_minimize(ionic_forces=1e-3)
self.assertEqual(self.job.input.incar["EDIFFG"], -0.001)
self.assertEqual(self.job.input.incar["EDIFF"], 1e-7)
def test_mixing_parameter(self):
job = self.project.create_job('Vasp', 'mixing_parameter')
job.set_mixing_parameters(density_mixing_parameter=0.1)
self.assertEqual(job.input.incar['IMIX'], 4)
with self.assertRaises(NotImplementedError):
job.set_mixing_parameters(density_residual_scaling=0.1)
def test_potentials(self):
# Assert that no warnings are raised
with warnings.catch_warnings(record=True) as w:
structure = self.project.create_ase_bulk("Al", cubic=True)
element = self.project.create_element(new_element_name='Al_GW', parent_element="Al", potential_file='Al_GW')
structure[:] = element
job = self.project.create.job.Vasp("test")
job.structure = structure
job.run(run_mode="manual")
self.assertEqual(len(w), 0)
def test_kspacing(self):
job_kspace = self.project.create_job("Vasp", "job_kspacing")
job_kspace.structure = self.project.create_ase_bulk("Fe")
job_kspace.input.incar["KSPACING"] = 0.5
with warnings.catch_warnings(record=True) as w:
job_kspace.run(run_mode="manual")
self.assertNotIn("KPOINTS", job_kspace.list_files(), "'KPOINTS' file written even when "
"KPACING tag is present in INCAR")
self.assertEqual(len(w), 1)
self.assertEqual(str(w[0].message), "'KSPACING' found in INCAR, no KPOINTS file written")
if __name__ == "__main__":
unittest.main()
|
py | 7dfb87934c3d1c89cac6632ff0be21a9e93d8806 | """blogger URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('posts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
]
|
py | 7dfb87a4706a1bb6947f69d3a4f8a77626a56cf9 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True if the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
py | 7dfb8809267629fed27d78df6d05d3c8d1a6996d | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import time
from frappe import _, msgprint
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock
from frappe.utils.background_jobs import enqueue
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields
from frappe.utils.file_manager import save_url
# once_only validation
# methods
def get_doc(arg1, arg2=None):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
There are two ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "[email protected]")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "[email protected]",
"user_roles: [
{"role": "System Manager"}
]
})
"""
if isinstance(arg1, BaseDocument):
return arg1
elif isinstance(arg1, basestring):
doctype = arg1
else:
doctype = arg1.get("doctype")
controller = get_controller(doctype)
if controller:
return controller(arg1, arg2)
raise ImportError, arg1
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, arg1, arg2=None):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if arg1 and isinstance(arg1, basestring):
if not arg2:
# single
self.doctype = self.name = arg1
else:
self.doctype = arg1
if isinstance(arg2, dict):
# filter
self.name = frappe.db.get_value(arg1, arg2, "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(arg1), arg2), frappe.DoesNotExistError)
else:
self.name = arg2
self.load_from_db()
elif isinstance(arg1, dict):
super(Document, self).__init__(arg1)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise frappe.DataError("Document({0}, {1})".format(arg1, arg2))
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlabel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlabel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
msg = _("No permission to {0} {1} {2}".format(perm_type, self.doctype, self.name or ""))
frappe.msgprint(msg)
raise frappe.PermissionError(msg)
def insert(self, ignore_permissions=None):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.email_alerts_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self.set_new_name()
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
self.db_insert()
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.email_alerts_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if self.get("__islocal") or not self.get("name"):
self.insert()
return
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
return self
def copy_attachments_from_amended_from(self):
'''Copy attachments from `amended_from`'''
from frappe.desk.form.load import get_attachments
#loop through attachments
for attach_item in get_attachments(self.doctype, self.amended_from):
#save attachments to new doc
save_url(attach_item.file_url, attach_item.file_name, self.doctype, self.name, "Home/Attachments", attach_item.is_private)
def update_children(self):
'''update child tables'''
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
'''sync child table for given fieldname'''
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# select rows that do not match the ones in the document
deleted_rows = frappe.db.sql("""select name from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
if len(deleted_rows) > 0:
# delete rows that do not match the ones in the document
frappe.db.sql("""delete from `tab{0}` where name in ({1})""".format(df.options,
','.join(['%s'] * len(deleted_rows))), tuple(row[0] for row in deleted_rows))
else:
# no rows found, delete all rows
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s""".format(df.options),
(self.name, self.doctype, fieldname))
def set_new_name(self):
"""Calls `frappe.naming.se_new_name` for parent and child docs."""
set_new_name(self)
# set name for children
for d in self.get_all_children():
set_new_name(d)
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in values.iteritems():
if value==None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.sql("""delete from tabSingles where doctype=%s""", self.doctype)
for field, value in d.iteritems():
if field != "doctype":
frappe.db.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_links()
self._validate_selects()
self._validate_constants()
self._validate_length()
self._extract_images_from_text_editor()
self._sanitize_content()
self._save_passwords()
children = self.get_all_children()
for d in children:
d._validate_selects()
d._validate_constants()
d._validate_length()
d._extract_images_from_text_editor()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
def apply_fieldlevel_read_permissions(self):
'''Remove values the user is not allowed to read (called when loading in desk)'''
has_higher_permlevel = False
for p in self.get_permissions():
if p.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).meta.get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
user_roles = frappe.get_roles()
self._has_access_to = []
for perm in self.get_permissions():
if perm.role in user_roles and perm.permlevel > 0 and perm.get(permission_type):
if perm.permlevel not in self._has_access_to:
self._has_access_to.append(perm.permlevel)
return self._has_access_to
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access()
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal'):
if self.meta.issingle:
modified = frappe.db.sql('''select value from tabSingles
where doctype=%s and field='modified' for update''', self.doctype)
modified = modified and modified[0][0]
if modified and modified != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
"""
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
else:
raise frappe.DocstatusTransitionError, _("Cannot change docstatus from 0 to 2")
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError, _("Cannot change docstatus from 1 to 0")
elif docstatus==2:
raise frappe.ValidationError, _("Cannot edit cancelled document")
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print self.as_json().encode("utf-8")
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links:
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": "Table"}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = method.encode("utf-8")
out = Document.hook(fn)(self, *args, **kwargs)
self.run_email_alerts(method)
return out
def run_trigger(self, method, *args, **kwargs):
return self.run_method(method, *args, **kwargs)
def run_email_alerts(self, method):
'''Run email alerts for this method'''
if frappe.flags.in_import or frappe.flags.in_patch or frappe.flags.in_install:
return
if self.flags.email_alerts_executed==None:
self.flags.email_alerts_executed = []
from frappe.email.doctype.email_alert.email_alert import evaluate_alert
if self.flags.email_alerts == None:
alerts = frappe.cache().hget('email_alerts', self.doctype)
if alerts==None:
alerts = frappe.get_all('Email Alert', fields=['name', 'event', 'method'],
filters={'enabled': 1, 'document_type': self.doctype})
frappe.cache().hset('email_alerts', self.doctype, alerts)
self.flags.email_alerts = alerts
if not self.flags.email_alerts:
return
def _evaluate_alert(alert):
if not alert.name in self.flags.email_alerts_executed:
evaluate_alert(self, alert.name, alert.event)
event_map = {
"on_update": "Save",
"after_insert": "New",
"on_submit": "Submit",
"on_cancel": "Cancel"
}
if not self.flags.in_insert:
# value change is not applicable in insert
event_map['validate'] = 'Value Change'
for alert in self.flags.email_alerts:
event = event_map.get(method, None)
if event and alert.event == event:
_evaluate_alert(alert)
elif alert.event=='Method' and method == alert.method:
_evaluate_alert(alert)
@staticmethod
def whitelist(f):
f.whitelisted = True
return f
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = 1
self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self.docstatus = 2
self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self._cancel()
def delete(self):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.set_title_field()
self.reset_seen()
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
if not self.flags.ignore_submit_comment:
self.add_comment("Submitted")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
if not self.flags.ignore_submit_comment:
self.add_comment("Cancelled")
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.run_method('on_change')
self.update_timeline_doc()
self.clear_cache()
self.notify_update()
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
self.latest = None
def clear_cache(self):
frappe.cache().hdel("last_modified", self.doctype)
def reset_seen(self):
'''Clear _seen property and set current user as seen'''
if getattr(self.meta, 'track_seen', False):
self._seen = json.dumps([frappe.session.user])
def notify_update(self):
"""Publish realtime that the current document is modified"""
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
frappe.publish_realtime("list_update", {"doctype": self.doctype}, after_commit=True)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
@staticmethod
def whitelist(f):
"""Decorator: Whitelist method to be called remotely via REST API."""
f.whitelisted = True
return f
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method):
fn = getattr(self, method, None)
if not fn:
raise NotFound("Method {0} not found".format(method))
elif not getattr(fn, "whitelisted", False):
raise Forbidden("Method {0} not whitelisted".format(method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}".format(doc.idx, label, condition_str, val2))
else:
msg = _("Incorrect value: {0} must be {1} {2}".format(label, condition_str, val2))
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def get_url(self):
"""Returns Desk URL for this document. `/desk#Form/{doctype}/{name}`"""
return "/desk#Form/{doctype}/{name}".format(doctype=self.doctype, name=self.name)
def add_comment(self, comment_type, text=None, comment_by=None, link_doctype=None, link_name=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
comment = frappe.get_doc({
"doctype":"Communication",
"communication_type": "Comment",
"sender": comment_by or frappe.session.user,
"comment_type": comment_type,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
return comment
def add_seen(self, user=None):
'''add the given/current user to list of users who have seen this document (_seen)'''
if not user:
user = frappe.session.user
if self.meta.track_seen:
if self._seen:
_seen = json.loads(self._seen)
else:
_seen = []
if user not in _seen:
_seen.append(user)
self.db_set('_seen', json.dumps(_seen), update_modified=False)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation)).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def update_timeline_doc(self):
if frappe.flags.in_install or not self.meta.get("timeline_field"):
return
timeline_doctype = self.meta.get_link_doctype(self.meta.timeline_field)
timeline_name = self.get(self.meta.timeline_field)
if not (timeline_doctype and timeline_name):
return
# update timeline doc in communication if it is different than current timeline doc
frappe.db.sql("""update `tabCommunication`
set timeline_doctype=%(timeline_doctype)s, timeline_name=%(timeline_name)s
where
reference_doctype=%(doctype)s and reference_name=%(name)s
and (timeline_doctype is null or timeline_doctype != %(timeline_doctype)s
or timeline_name is null or timeline_name != %(timeline_name)s)""",
{
"doctype": self.doctype,
"name": self.name,
"timeline_doctype": timeline_doctype,
"timeline_name": timeline_name
})
def queue_action(self, action, **kwargs):
'''Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead'''
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
if hasattr(self, '_' + action):
action = '_' + action
if file_lock.lock_exists(self.get_signature()):
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'), indicator='red')
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def lock(self, timeout=None):
'''Creates a lock file for the given document. If timeout is set,
it will retry every 1 second for acquiring the lock again
:param timeout: Timeout in seconds, default 0'''
signature = self.get_signature()
if file_lock.lock_exists(signature):
lock_exists = True
if timeout:
for i in range(timeout):
time.sleep(1)
if not file_lock.lock_exists(signature):
lock_exists = False
break
if lock_exists:
raise frappe.DocumentLockedError
file_lock.create_lock(signature)
def unlock(self):
'''Delete the lock file for this document'''
file_lock.delete_lock(self.get_signature())
def execute_action(doctype, name, action, **kwargs):
'''Execute an action on a document (called by background worker)'''
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except Exception:
frappe.db.rollback()
# add a comment (?)
if frappe.local.message_log:
msg = json.loads(frappe.local.message_log[-1]).get('message')
else:
msg = '<pre><code>' + frappe.get_traceback() + '</pre></code>'
doc.add_comment('Comment', _('Action Failed') + '<br><br>' + msg)
doc.notify_update()
|
py | 7dfb8871d137c9cc64d6c12ce1d90b1d476692e4 | # Templating utils
from util.templating import *
# Error documents
from error import *
# Page view functions
from main import *
from paste import *
from user import *
from misc import *
# API view functions
from api.authentication import *
from api.paste import *
from api.user import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.