ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40dbab0b41a080f68721481ff93b8cef4cc88ab | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
Tests for L{twisted.trial.util}
"""
from twisted.trial.unittest import TestCase
from unittest import skipIf
@skipIf(True, "Skip all tests when @skipIf is used on a class")
class SkipDecoratorUsedOnClass(TestCase):
"""
All tests should be skipped because @skipIf decorator is used on
this class.
"""
def test_shouldNeverRun_1(self):
raise Exception("Test should skip and never reach here")
def test_shouldNeverRun_2(self):
raise Exception("Test should skip and never reach here")
@skipIf(True, "")
class SkipDecoratorUsedOnClassWithEmptyReason(TestCase):
"""
All tests should be skipped because @skipIf decorator is used on
this class, even if the reason is an empty string
"""
def test_shouldNeverRun_1(self):
raise Exception("Test should skip and never reach here")
def test_shouldNeverRun_2(self):
raise Exception("Test should skip and never reach here")
class SkipDecoratorUsedOnMethods(TestCase):
"""
Only methods where @skipIf decorator is used should be skipped.
"""
@skipIf(True, "skipIf decorator used so skip test")
def test_shouldNeverRun(self):
raise Exception("Test should skip and never reach here")
@skipIf(True, "")
def test_shouldNeverRunWithEmptyReason(self):
raise Exception("Test should skip and never reach here")
def test_shouldShouldRun(self):
self.assertTrue(True, "Test should run and not be skipped")
@skipIf(False, "should not skip")
def test_shouldShouldRunWithSkipIfFalse(self):
self.assertTrue(True, "Test should run and not be skipped")
@skipIf(False, "")
def test_shouldShouldRunWithSkipIfFalseEmptyReason(self):
self.assertTrue(True, "Test should run and not be skipped")
class SkipAttributeOnClass(TestCase):
"""
All tests should be skipped because skip attribute is set on
this class.
"""
skip = "'skip' attribute set on this class, so skip all tests"
def test_one(self):
raise Exception("Test should skip and never reach here")
def test_two(self):
raise Exception("Test should skip and never reach here")
class SkipAttributeOnMethods(TestCase):
"""
Only methods where @skipIf decorator is used should be skipped.
"""
def test_one(self):
raise Exception("Should never reach here")
test_one.skip = "skip test, skip attribute set on method" # type: ignore[attr-defined]
def test_shouldNotSkip(self):
self.assertTrue(True, "Test should run and not be skipped")
|
py | b40dbb3fe1c4a1cf3230ea820795aec2e781ec54 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple agent-environment training loop."""
from bsuite.baselines import base
from bsuite.logging import terminal_logging
import dm_env
import numpy as np
import wandb
def run(agent: base.Agent,
train_environment: dm_env.Environment,
test_environment: dm_env.Environment,
num_episodes: int,
verbose: bool = False) -> None:
"""Runs an agent on an environment.
Note that for bsuite environments, logging is handled internally.
Args:
agent: The agent to train and evaluate.
environment: The environment to train on.
num_episodes: Number of episodes to train for.
verbose: Whether to also log to terminal.
"""
if verbose:
test_environment = terminal_logging.wrap_environment(
test_environment, log_every=True) # pytype: disable=wrong-arg-types
train_scores, test_scores = [], []
for i_episode in range(num_episodes):
# Run an episode.
score = 0
ep_var, ep_weights, eff_bs_list, eps_list = [], [], [], []
timestep = train_environment.reset()
while not timestep.last():
# Generate an action from the agent's policy.
action = agent.select_action(timestep)
# Step the environment.
new_timestep = train_environment.step(action)
# Tell the agent about what just happened.
logs = agent.update(timestep, action, new_timestep)
if len(logs) > 0:
ep_var.extend(logs[0])
ep_weights.extend(logs[1])
eff_bs_list.append(logs[2])
eps_list.append(logs[3])
# Book-keeping.
timestep = new_timestep
score += timestep.reward
train_scores.append(score)
if i_episode % 1 == 0:
test_score = test(agent, test_environment)
test_scores.append(test_score)
# wandb.log({"Test Return": test_score, "Test Return / 100 episodes": np.mean(test_scores[-100:])}, commit=False)
# if len(ep_var) > 0:
# agent.train_log(ep_var, ep_weights, eff_bs_list, eps_list)
# wandb.log({"Train Return": score, "Train Return / 100 episodes": np.mean(train_scores[-100:])})
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(train_scores[-100:])), end="")
def test(agent, environment):
score = 0
timestep = environment.reset()
while not timestep.last():
action = agent.select_action_test(timestep)
new_timestep = environment.step(action)
timestep = new_timestep
score += timestep.reward
return score |
py | b40dbb9d4da9339396da34fe6417c535e8cc01cc | #
# Metrix++, Copyright 2009-2019, Metrix++ Project
# Link: https://github.com/metrixplusplus/metrixplusplus
#
# This file is a part of Metrix++ Tool.
#
import mpp.api
import re
class Plugin(mpp.api.Plugin,
mpp.api.IConfigurable,
mpp.api.Child,
mpp.api.MetricPluginMixin):
def declare_configuration(self, parser):
parser.add_option("--std.code.member.fields", "--scmf",
action="store_true", default=False,
help="Enables collection of number of data members / fields "
"per classes, structs and interfaces [default: %default]")
parser.add_option("--std.code.member.globals", "--scmg",
action="store_true", default=False,
help="Enables collection of number of global variables / fields "
"per global regions and namespaces [default: %default]")
parser.add_option("--std.code.member.classes", "--scmc",
action="store_true", default=False,
help="Enables collection of number of classes defined "
"per any region [default: %default]")
parser.add_option("--std.code.member.structs", "--scms",
action="store_true", default=False,
help="Enables collection of number of structs defined "
"per any region [default: %default]")
parser.add_option("--std.code.member.interfaces", "--scmi",
action="store_true", default=False,
help="Enables collection of number of interfaces defined "
"per any region [default: %default]")
parser.add_option("--std.code.member.types", "--scmt",
action="store_true", default=False,
help="Enables collection of number of types (classes, structs "
"or interface) defined per any region [default: %default]")
parser.add_option("--std.code.member.methods", "--scmm",
action="store_true", default=False,
help="Enables collection of number of methods (functions) defined "
"per any region [default: %default]")
parser.add_option("--std.code.member.namespaces", "--scmnss",
action="store_true", default=False,
help="Enables collection of number of namespaces defined "
"globally and enclosed (sub-namespaces) [default: %default]")
def configure(self, options):
self.is_active_fields = options.__dict__['std.code.member.fields']
self.is_active_globals = options.__dict__['std.code.member.globals']
self.is_active_classes = options.__dict__['std.code.member.classes']
self.is_active_structs = options.__dict__['std.code.member.structs']
self.is_active_interfaces = options.__dict__['std.code.member.interfaces']
self.is_active_types = options.__dict__['std.code.member.types']
self.is_active_methods = options.__dict__['std.code.member.methods']
self.is_active_namespaces = options.__dict__['std.code.member.namespaces']
def initialize(self):
# counts fields and properties with default getter/setter
pattern_to_search_cs = re.compile(
r'''([_a-zA-Z][_a-zA-Z0-9]*\s+[_a-zA-Z][_a-zA-Z0-9])\s*([=;]|'''
r'''[{]\s*(public\s+|private\s+|protected\s+|internal\s+)?(get|set)\s*[;]\s*[a-z \t\r\n]*[}])''')
pattern_to_search_cpp = re.compile(
r'''([_a-zA-Z][_a-zA-Z0-9]*\s+[_a-zA-Z][_a-zA-Z0-9])\s*[=;]''')
pattern_to_search_java = re.compile(
r'''([_$a-zA-Z][_$a-zA-Z0-9]*\s+[_$a-zA-Z][_$a-zA-Z0-9])\s*[=;]''')
self.declare_metric(self.is_active_fields,
self.Field('fields', int, non_zero=True),
{
'std.code.java': pattern_to_search_java,
'std.code.cpp': pattern_to_search_cpp,
'std.code.cs': pattern_to_search_cs,
},
marker_type_mask=mpp.api.Marker.T.CODE,
region_type_mask=mpp.api.Region.T.CLASS |
mpp.api.Region.T.STRUCT | mpp.api.Region.T.INTERFACE)
self.declare_metric(self.is_active_globals,
self.Field('globals', int, non_zero=True),
{
'std.code.java': pattern_to_search_java,
'std.code.cpp': pattern_to_search_cpp,
'std.code.cs': pattern_to_search_cs,
},
marker_type_mask=mpp.api.Marker.T.CODE,
region_type_mask=mpp.api.Region.T.GLOBAL |
mpp.api.Region.T.NAMESPACE)
self.declare_metric(self.is_active_classes,
self.Field('classes', int, non_zero=True),
(None, self.ClassesCounter),
exclude_subregions=False,
merge_markers=True)
self.declare_metric(self.is_active_structs,
self.Field('structs', int, non_zero=True),
(None, self.StructCounter),
exclude_subregions=False,
merge_markers=True)
self.declare_metric(self.is_active_interfaces,
self.Field('interfaces', int, non_zero=True),
(None, self.InterfaceCounter),
exclude_subregions=False,
merge_markers=True)
self.declare_metric(self.is_active_types,
self.Field('types', int, non_zero=True),
(None, self.TypeCounter),
exclude_subregions=False,
merge_markers=True)
self.declare_metric(self.is_active_methods,
self.Field('methods', int, non_zero=True),
(None, self.MethodCounter),
exclude_subregions=False,
merge_markers=True)
self.declare_metric(self.is_active_namespaces,
self.Field('namespaces', int, non_zero=True),
(None, self.NamespaceCounter),
exclude_subregions=False,
merge_markers=True)
super(Plugin, self).initialize(fields=self.get_fields())
if self.is_active() == True:
self.subscribe_by_parents_interface(mpp.api.ICode)
class ClassesCounter(mpp.api.MetricPluginMixin.PlainCounter):
def count(self, marker, pattern_to_search):
self.result = sum(1 for unused in self.data.iterate_regions(
filter_group=mpp.api.Region.T.CLASS, region_id=self.region.get_id()))
class StructCounter(mpp.api.MetricPluginMixin.PlainCounter):
def count(self, marker, pattern_to_search):
self.result = sum(1 for unused in self.data.iterate_regions(
filter_group=mpp.api.Region.T.STRUCT, region_id=self.region.get_id()))
class InterfaceCounter(mpp.api.MetricPluginMixin.PlainCounter):
def count(self, marker, pattern_to_search):
self.result = sum(1 for unused in self.data.iterate_regions(
filter_group=mpp.api.Region.T.INTERFACE, region_id=self.region.get_id()))
class TypeCounter(mpp.api.MetricPluginMixin.PlainCounter):
def count(self, marker, pattern_to_search):
self.result = sum(1 for unused in self.data.iterate_regions(
filter_group=mpp.api.Region.T.CLASS | mpp.api.Region.T.STRUCT |
mpp.api.Region.T.INTERFACE, region_id=self.region.get_id()))
class MethodCounter(mpp.api.MetricPluginMixin.PlainCounter):
def count(self, marker, pattern_to_search):
self.result = sum(1 for unused in self.data.iterate_regions(
filter_group=mpp.api.Region.T.FUNCTION, region_id=self.region.get_id()))
class NamespaceCounter(mpp.api.MetricPluginMixin.PlainCounter):
def count(self, marker, pattern_to_search):
self.result = sum(1 for unused in self.data.iterate_regions(
filter_group=mpp.api.Region.T.NAMESPACE, region_id=self.region.get_id()))
|
py | b40dbc1d9bb6f8bc376aa2eda5b3cade7f06cda5 | import logging
import numpy as np
import cv2
from ml_serving.utils.helpers import load_image
LOG = logging.getLogger(__name__)
def init_hook(**params):
LOG.info('Loaded.')
def process(inputs, ct_x, **kwargs):
original_image, is_video = load_image(inputs, 'inputs')
if original_image is None:
raise RuntimeError('Missing "inputs" key in inputs. Provide an image in "inputs" key')
if original_image.shape[2] > 3:
original_image = original_image[:, :, 0:3]
def _return(result):
encoding = ''
if not is_video:
if result.shape[2] == 3:
result = result[:, :, ::-1]
result = cv2.imencode('.jpg', result)[1].tostring()
encoding = 'jpeg'
else:
result = result
result = cv2.imencode('.png', result)[1].tostring()
encoding = 'png'
return {'output': result, 'encoding': encoding}
ratio = 1.0
w = float(original_image.shape[1])
h = float(original_image.shape[0])
if w > h:
if w > 1024:
ratio = w / 1024.0
else:
if h > 1024:
ratio = h / 1024.0
if ratio > 1:
image = cv2.resize(original_image, (int(w / ratio), int(h / ratio)))
else:
image = original_image
serv_image = cv2.resize(image, (160, 160)).astype(np.float32) / 255.0
result = ct_x.drivers[0].predict({'image': np.expand_dims(serv_image, axis=0)})
mask = result['output'][0]
mask[mask < 0.5] = 0
if mask.shape != image.shape:
mask = cv2.resize(mask, (image.shape[1], image.shape[0]))
mask = cv2.GaussianBlur(mask, (21, 21), 11)
if len(mask.shape) == 2:
mask = np.expand_dims(mask, axis=2)
if not is_video:
mask = (mask * 255).astype(np.uint8)
image = image[:, :, ::-1].astype(np.uint8)
image = np.concatenate([image, mask], axis=2)
else:
image = image.astype(np.float32) * mask
image = image.astype(np.uint8)
return _return(image)
|
py | b40dbc59b44c1f4c881c548a5587fe3262d54079 | import time
import board
import terminalio
import displayio
import adafruit_sgp30
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text import label
import adafruit_imageload
from adafruit_clue import clue
# --| User Config |-------------------------
TVOC_LEVELS = (80, 120) # set two TVOC levels
MESSAGES = ("GOOD", "SUS?", "BAD!") # set three messages (4 char max)
# ------------------------------------------
# setup UI
cow_bmp, cow_pal = adafruit_imageload.load("bmps/milk_bg.bmp")
background = displayio.TileGrid(cow_bmp, pixel_shader=cow_pal)
mouth_bmp, mouth_pal = adafruit_imageload.load("bmps/mouth_sheet.bmp")
mouth = displayio.TileGrid(
mouth_bmp,
pixel_shader=mouth_pal,
tile_width=40,
tile_height=20,
width=1,
height=1,
x=35,
y=110,
)
msg_font = bitmap_font.load_font("fonts/Alphakind_28.bdf")
msg_font.load_glyphs("".join(MESSAGES))
message = label.Label(msg_font, text="WAIT", color=0x000000)
message.anchor_point = (0.5, 0.5)
message.anchored_position = (172, 38)
data_font = bitmap_font.load_font("fonts/F25_Bank_Printer_Bold_12.bdf")
data_font.load_glyphs("eTVOC=12345?")
tvoc = label.Label(data_font, text="TVOC=?????", color=0x000000)
tvoc.anchor_point = (0, 1)
tvoc.anchored_position = (5, 235)
eco2 = label.Label(data_font, text="eCO2=?????", color=0x000000)
eco2.anchor_point = (0, 1)
eco2.anchored_position = (130, 235)
splash = displayio.Group(max_size=5)
splash.append(background)
splash.append(mouth)
splash.append(message)
splash.append(tvoc)
splash.append(eco2)
clue.display.show(splash)
# setup SGP30 and wait for initial warm up
sgp30 = adafruit_sgp30.Adafruit_SGP30(board.I2C())
time.sleep(15)
# loop forever
while True:
eCO2, TVOC = sgp30.iaq_measure()
tvoc.text = "TVOC={:5d}".format(TVOC)
eco2.text = "eCO2={:5d}".format(eCO2)
level = 0
for thresh in TVOC_LEVELS:
if TVOC <= thresh:
break
level += 1
if level <= len(TVOC_LEVELS):
message.text = MESSAGES[level]
mouth[0] = level
else:
message.text = "????"
time.sleep(1)
|
py | b40dbddd26abe02008da4cd37ef97c3ad410030b | from flask import Flask
from flask import request as rq
import token_srm
import attendence_marks
import timetable
import course_personal_details
import json
from flask import Response
app = Flask(__name__)
@app.route('/')
def home():
json_o = {"status": "success", "msg": "Ready to Push Data from academia server"}
json_o = json.dumps(json_o)
return json_o
@app.route('/token', methods=['GET', 'POST'])
def request():
if 'email' in rq.args and 'pass' in rq.args:
response = token_srm.getToken(rq.args.get('email'), rq.args.get('pass'))
response = Response(response, status=200, mimetype='application/json')
return response
else:
response = {"status":"error", "msg":"Error in Input Parameters"}
response = json.dumps(response)
response = Response(response, status=200, mimetype='application/json')
return response
@app.route('/AttAndMarks', methods=['GET', 'POST'])
def AttAndMarks():
if 'token' in rq.args:
token = str(rq.args.get('token'))
att_marks = attendence_marks.getAttendenceAndMarks(token)
response = Response(att_marks, status=200, mimetype='application/json')
return response
else:
response = {"status": "error", "msg": "Error in Input Parameters"}
response = json.dumps(response)
response = Response(response, status=200, mimetype='application/json')
return response
@app.route('/TimeTable', methods=['GET', 'POST'])
def TimeTable():
if 'batch' in rq.args and 'token' in rq.args:
batchNo = rq.args.get('batch')
token = rq.args.get('token')
timeTable = timetable.getTimeTable(token, batchNo)
response = Response(timeTable, status=200, mimetype='application/json')
return response
else:
response = {"status": "error", "msg": "Error in Input Parameters"}
response = json.dumps(response)
response = Response(response, status=200, mimetype='application/json')
return response
@app.route('/PersonalDetails', methods=['GET', 'POST'])
def getPersonalDetails():
if 'token' in rq.args:
token = rq.args.get('token')
details = course_personal_details.getCoursePersonalDetails(token)
response = Response(details, status=200, mimetype='application/json')
return response
else:
response = {"status": "error", "msg": "Error in Input Parameters"}
response = json.dumps(response)
response = Response(response, status=200, mimetype='application/json')
return response
if __name__ == '__main__':
from os import environ
app.run(debug=False, host='0.0.0.0', port=environ.get("80", 5000))
|
py | b40dbdebaf31966d5566424f30b6460dd408c6b9 | import tkinter as tk
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def curva_de_ejemplo():
"""
Curva de Ejemplo que despliega una curva paramétrica en una ventana nueva
Integrantes:
- Omar Olivares Urrutia (@ofou)
:return: plot curve
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Prepare arrays x, y, z
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
ax.plot(x, y, z, label='Curva Paramétrica de Ejemplo')
ax.legend()
fig.show()
# Añadir el plot de matplotlib
# fig = Figure(figsize=(5, 4), dpi=100)
# t = np.arange(0, 3, .01)
# fig.add_subplot(111).plot(t, 2 * np.sin(2 * np.pi * t))
#
# canvas = FigureCanvasTkAgg(fig, master=root) # A tk.DrawingArea.
# canvas.draw()
# canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
#
# toolbar = NavigationToolbar2Tk(canvas, root)
# toolbar.update()
# canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
# canvas.mpl_connect("key_press_event", on_key_press)
# If you put root.destroy() here, it will cause an error if the window is
# closed with the window manager.
def helice_conica():
# añadir sus códigos aca
"""
Curva de Ejemplo que despliega una Helice Cónica
Integrantes:
- Mario Labbé (@LsMario1998)
- Mario González (@tatameister)
- Cristóbal Cortés (@Cristobal140)
- Thadly Guerra (@Thadly64)
- Luis Inostroza (@luisinostrozaf)
:return: Curva Helice Cónica
"""
plt.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
# Prepare arrays x, y, z
theta = np.linspace(-6 * np.pi, 6 * np.pi, 1000)
print (np.cos((np.pi*30)/180))
e = 2.718281
a = 3
x = a * (e**(np.sin(45) * (1/np.tan(30)*theta))) * np.cos(theta)
y = a * (e**(np.sin(45) * (1/np.tan(30)*theta)))* np.sin(theta)
z = a * (e**(np.sin(45) * (1/np.tan(30)*theta))) * (1/np.tan(45))
ax.plot(x, y, z, label='helice cónica')
ax.legend()
plt.show()
pass
def helice_circular_1():
"""
Curva que depliega una una helice circular en una ventana nueva
Integrantes:
- Felipe Lopez Vergara (@felipelopez00)
- Bastian Bustamante Moraga (@BastianBustamante)
- Rodrigo Torrez Queupan (@imperium31)
- Juan Hernandez Gatica (@juanpablo1994)
-Eric Rojas Palma (@valukar)
:return: circular propeller
"""
# añadir sus códigos aca
n = 1000
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# caracteristicas de la helice circular
t_max = 8 * np.pi
t = np.linspace(0, t_max, n)
z = t
r = 5
y = r * np.sin(t)
x = r * np.cos(t)
ax.plot(x, y, z, 'b', lw=2, label='Curva Hélice Circular')
ax.legend()
# linea ax.legend()oja al centro de la helice circular
ax.plot((0, 0), (0, 0), (-t_max * 0.2, t_max * 1.2), color='r', lw=2)
plt.show()
pass
def Corona_Sinusoidal():
''' INTEGRANTES GRUPO:
_Luis Soto Zelada (@Luiss23)
_Diego Rojas (@diegoskky)
_Lucia Vilches (@luciavj)
grafica una corona sinusoidal en un plano cartesiano
De la forma f(x)=2sen(pi * x)'''
Fs: int = 80 # corresponde al limite de la funcion en un ciclo
f: float = 1 # cantidad de unidades del eje y
sample: int = 80
x = np.arange(sample)
y = np.sin(2 * np.pi * f * x / Fs)
plt.plot(x, y)
plt.show()
pass
def curva_de_viviani():
"""
Funcion que muestra una curva de viviani en una nueva ventana
Integrantes:
Levi Urbina
Natalia Valenzuela
Ricardo Vergara
Estefany Alarcon
return: curva_de_viviani
"""
a = 1
t = np.linspace(-4, 4 * np.pi, 100)
x = a * (1 + np.cos(t))
y = a * np.sin(t)
z = 2 * a * np.sin(t / 2)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_title("Curva de viviani")
ax.plot(x, y, z, label="Curva de Viviani", lw=5)
plt.show()
pass
def hipopoda_1():
# añadir sus códigos aca
'''
Integrantes:
- Boris Gutiérrez Cornejo (@BorisAndresLmb)
- Juan González Jélvez (@JuanGonzalez33)
- Pablo Barrera Whiteley (@Pablobw)
- José Flores Cáceres (@JoseFlores9)
- Cristobal Rojas Saavedra (@cristotix)
Función hipopoda_1: Grafica la hipopoda
Utiliza la forma paramétrica de la función
x= a+(r-a)*cos(t)
y=(r-a)*sen(t)
z=2*((a*(r-a))**1/2))*sen(t)
Parametros:
a= distancia del centro de la esfera al eje del cilindro
r=Radio de la esfera
return: plot Curve (Hipopede)
'''
plt.rcParams['legend.fontsize'] = 12
fig = plt.figure()
ax = fig.gca(projection='3d')
# Prepare arrays x, y, z
theta = np.linspace(-4 * np.pi, 4 * np.pi, 99)
a = 1
r = 5
x = a + (r - a) * np.cos(theta)
y = (r - a) * np.sin(theta)
z = 2 * (a * (r - a)) ** (1 / 2) * np.sin(theta / 2)
ax.plot(x, y, z, label='Hipopede de Eudoxo')
ax.legend()
plt.show()
pass
def conica_de_papus():
"""
Curva que entrega una conica de papus en la interfaz grafica
Integrantes:
- José Fabián Ignacio González Encina (@GoldenFenix)
- Cristian Eduardo Castillo (@criseduardjjd)
- Diego Faundez Mendez(@diegofaundezm)
- Claudio Alcaino Muñoz (@klauser99)
- Francisco Castillo Moraga(@taifokk)
:return: conica de papus
"""
plt.rcParams['legend.fontsize'] = 12
fig = plt.figure()
ax = fig.gca(projection='3d')
# Prepare arrays x, y, z
t = np.linspace(-9 * np.pi, 9 * np.pi, 2000)
a1 = 30
a = 15
z = a1 * np.cos(a) * t
r = z ** 2 + 1
x = a1 * np.sin(a) * t * np.cos(t)
y = a1 * np.sin(a) * t * np.sin(t)
ax.plot(x, y, z, label='espiral conica de papus')
ax.legend()
plt.show()
pass
def Curva_de_Arquitas():
"""""
Tipo de curva: Curva de Arquitas
Integrantes:
Nicolas Fernandez (@matiche)
Sebastian Mendez (@SebaMendez)
Cristobal Moreira (@cmoreirab)
Gabriel Lara (@Gabolara453)
Dennis Queirolo (@dennis-queirolo)
:return: Curva de arquitas
"""
plt.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
# Prepare arrays x, y, z
theta = np.linspace(-10 * np.pi, 10 * np.pi, 100)
a = 4
t = 10
z = a * ((1 - np.cos(theta)) * np.cos(theta)) - (np.pi / 2 <= t <= np.pi / 2)
z = - a * ((1 - np.cos(theta)) * np.cos(theta)) - (np.pi / 2 <= t <= np.pi / 2)
x = a * np.cos(theta) ** 2
y = a * np.cos(theta) * np.sin(theta)
ax.plot(x, y, z, label=('Curva de Arquitas'))
ax.legend()
plt.show()
def horoptera():
# añadir sus códigos aca
pass
def Curva_Bicilindrica():
# añadir sus códigos aca
pass
if __name__ == '__main__':
# Creación de Ventanas
root = tk.Tk()
root.wm_title("Proyecto de Fisica 2019/01")
root.geometry("800x600")
# Crear frame contenedor de los elementos
frame = tk.Frame(root)
frame.pack(padx=20, pady=20)
# Añadir titulo
label = tk.Label(frame, text="Curvas Paramétricas Famosas", height="2")
label.pack(fill=tk.X, expand=1)
# Cada grupo debe utilizar sus propia función
curva_de_ejemplo = tk.Button(master=frame, text="Curva de Ejemplo", command=curva_de_ejemplo)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Hélice Cónica", command=helice_conica)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Hélice Circular", command=helice_circular_1)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Corona Sinusoidal", command=Corona_Sinusoidal)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Curva de Viviani", command=curva_de_viviani)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Hipopoda", command=hipopoda_1)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Curva de Arquitas", command=Curva_de_Arquitas)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Curva Bicilindrica", command=Curva_Bicilindrica)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Conica de Papus", command=conica_de_papus)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
curva_de_ejemplo = tk.Button(master=frame, text="Horoptera", command=horoptera)
curva_de_ejemplo.pack(side=tk.BOTTOM, padx=10, pady=10)
tk.mainloop()
|
py | b40dbeb8d95f4c5dcdefd49eac45027355d3c35f | from .models import async_manager_func
__all__ = [
'async_manager_func',
]
|
py | b40dbf2a42141c1139eb9c25611404e974385992 | import re
from django import forms
from django.forms.models import fields_for_model
from nautobot.utilities.querysets import RestrictedQuerySet
from .constants import ALPHANUMERIC_EXPANSION_PATTERN, IP4_EXPANSION_PATTERN, IP6_EXPANSION_PATTERN
__all__ = (
"add_blank_choice",
"add_field_to_filter_form_class",
"expand_alphanumeric_pattern",
"expand_ipaddress_pattern",
"form_from_model",
"parse_alphanumeric_range",
"parse_numeric_range",
"restrict_form_fields",
"parse_csv",
"validate_csv",
)
def parse_numeric_range(string, base=10):
"""
Expand a numeric range (continuous or not) into a decimal or
hexadecimal list, as specified by the base parameter
'0-3,5' => [0, 1, 2, 3, 5]
'2,8-b,d,f' => [2, 8, 9, a, b, d, f]
"""
values = list()
for dash_range in string.split(","):
try:
begin, end = dash_range.split("-")
except ValueError:
begin, end = dash_range, dash_range
begin, end = int(begin.strip(), base=base), int(end.strip(), base=base) + 1
values.extend(range(begin, end))
return list(set(values))
def parse_alphanumeric_range(string):
"""
Expand an alphanumeric range (continuous or not) into a list.
'a-d,f' => [a, b, c, d, f]
'0-3,a-d' => [0, 1, 2, 3, a, b, c, d]
"""
values = []
for dash_range in string.split(","):
try:
begin, end = dash_range.split("-")
vals = begin + end
# Break out of loop if there's an invalid pattern to return an error
if (not (vals.isdigit() or vals.isalpha())) or (vals.isalpha() and not (vals.isupper() or vals.islower())):
return []
except ValueError:
begin, end = dash_range, dash_range
if begin.isdigit() and end.isdigit():
for n in list(range(int(begin), int(end) + 1)):
values.append(n)
else:
# Value-based
if begin == end:
values.append(begin)
# Range-based
else:
# Not a valid range (more than a single character)
if not len(begin) == len(end) == 1:
raise forms.ValidationError('Range "{}" is invalid.'.format(dash_range))
for n in list(range(ord(begin), ord(end) + 1)):
values.append(chr(n))
return values
def expand_alphanumeric_pattern(string):
"""
Expand an alphabetic pattern into a list of strings.
"""
lead, pattern, remnant = re.split(ALPHANUMERIC_EXPANSION_PATTERN, string, maxsplit=1)
parsed_range = parse_alphanumeric_range(pattern)
for i in parsed_range:
if re.search(ALPHANUMERIC_EXPANSION_PATTERN, remnant):
for string in expand_alphanumeric_pattern(remnant):
yield "{}{}{}".format(lead, i, string)
else:
yield "{}{}{}".format(lead, i, remnant)
def expand_ipaddress_pattern(string, family):
"""
Expand an IP address pattern into a list of strings. Examples:
'192.0.2.[1,2,100-250]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.100/24' ... '192.0.2.250/24']
'2001:db8:0:[0,fd-ff]::/64' => ['2001:db8:0:0::/64', '2001:db8:0:fd::/64', ... '2001:db8:0:ff::/64']
"""
if family not in [4, 6]:
raise Exception("Invalid IP address family: {}".format(family))
if family == 4:
regex = IP4_EXPANSION_PATTERN
base = 10
else:
regex = IP6_EXPANSION_PATTERN
base = 16
lead, pattern, remnant = re.split(regex, string, maxsplit=1)
parsed_range = parse_numeric_range(pattern, base)
for i in parsed_range:
if re.search(regex, remnant):
for string in expand_ipaddress_pattern(remnant, family):
yield "".join([lead, format(i, "x" if family == 6 else "d"), string])
else:
yield "".join([lead, format(i, "x" if family == 6 else "d"), remnant])
def add_blank_choice(choices):
"""
Add a blank choice to the beginning of a choices list.
"""
return ((None, "---------"),) + tuple(choices)
def form_from_model(model, fields):
"""
Return a Form class with the specified fields derived from a model. This is useful when we need a form to be used
for creating objects, but want to avoid the model's validation (e.g. for bulk create/edit functions). All fields
are marked as not required.
"""
form_fields = fields_for_model(model, fields=fields)
for field in form_fields.values():
field.required = False
return type("FormFromModel", (forms.Form,), form_fields)
def restrict_form_fields(form, user, action="view"):
"""
Restrict all form fields which reference a RestrictedQuerySet. This ensures that users see only permitted objects
as available choices.
"""
for field in form.fields.values():
if hasattr(field, "queryset") and issubclass(field.queryset.__class__, RestrictedQuerySet):
field.queryset = field.queryset.restrict(user, action)
def parse_csv(reader):
"""
Parse a csv_reader object into a headers dictionary and a list of records dictionaries. Raise an error
if the records are formatted incorrectly. Return headers and records as a tuple.
"""
records = []
headers = {}
# Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional
# "to" field specifying how the related object is being referenced. For example, importing a Device might use a
# `site.slug` header, to indicate the related site is being referenced by its slug.
for header in next(reader):
if "." in header:
field, to_field = header.split(".", 1)
headers[field] = to_field
else:
headers[header] = None
# Parse CSV rows into a list of dictionaries mapped from the column headers.
for i, row in enumerate(reader, start=1):
if len(row) != len(headers):
raise forms.ValidationError(f"Row {i}: Expected {len(headers)} columns but found {len(row)}")
row = [col.strip() for col in row]
record = dict(zip(headers.keys(), row))
records.append(record)
return headers, records
def validate_csv(headers, fields, required_fields):
"""
Validate that parsed csv data conforms to the object's available fields. Raise validation errors
if parsed csv data contains invalid headers or does not contain required headers.
"""
# Validate provided column headers
for field, to_field in headers.items():
if field not in fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(fields[field], "to_field_name"):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields
for f in required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
def add_field_to_filter_form_class(form_class, field_name, field_obj):
"""
Attach a field to an existing filter form class.
"""
if not isinstance(field_obj, forms.Field):
raise TypeError(f"Custom form field `{field_name}` is not an instance of django.forms.Field.")
if field_name in form_class.base_fields:
raise AttributeError(
f"There was a conflict with filter form field `{field_name}`, the custom filter form field was ignored."
)
form_class.base_fields[field_name] = field_obj
|
py | b40dbf6ca736f0e4767cd8ee78c2a97669c5622d | import json
from ..helpers import BaseApplicationTest
import mock
from nose.tools import assert_equal, assert_false
class TestStatus(BaseApplicationTest):
def setup(self):
super(TestStatus, self).setup()
self._data_api_client = mock.patch(
'app.status.views.data_api_client'
).start()
self._search_api_client = mock.patch(
'app.status.views.search_api_client'
).start()
def teardown(self):
self._data_api_client.stop()
self._search_api_client.stop()
@mock.patch('app.status.views.data_api_client')
def test_should_return_200_from_elb_status_check(self, data_api_client):
status_response = self.client.get('/_status?ignore-dependencies')
assert_equal(200, status_response.status_code)
assert_false(data_api_client.called)
def test_status_ok(self):
self._data_api_client.get_status.return_value = {
'status': 'ok'
}
self._search_api_client.get_status.return_value = {
'status': 'ok'
}
status_response = self.client.get('/_status')
assert_equal(200, status_response.status_code)
json_data = json.loads(status_response.get_data().decode('utf-8'))
assert_equal(
"ok", "{}".format(json_data['api_status']['status']))
assert_equal(
"ok", "{}".format(json_data['search_api_status']['status']))
def test_status_error_in_one_upstream_api(self):
self._data_api_client.get_status.return_value = {
'status': 'error',
'app_version': None,
'message': 'Cannot connect to Database'
}
self._search_api_client.get_status.return_value = {
'status': 'ok'
}
response = self.client.get('/_status')
assert_equal(500, response.status_code)
json_data = json.loads(response.get_data().decode('utf-8'))
assert_equal("error", "{}".format(json_data['status']))
assert_equal("error", "{}".format(json_data['api_status']['status']))
assert_equal("ok", "{}".format(
json_data['search_api_status']['status']))
def test_status_no_response_in_one_upstream_api(self):
self._data_api_client.get_status.return_value = {
'status': 'ok'
}
self._search_api_client.get_status.return_value = None
response = self.client.get('/_status')
assert_equal(500, response.status_code)
json_data = json.loads(response.get_data().decode('utf-8'))
assert_equal("error", "{}".format(json_data['status']))
assert_equal("ok", "{}".format(json_data['api_status']['status']))
assert_equal(None, json_data.get('search_api_status'))
def test_status_error_in_two_upstream_apis(self):
self._data_api_client.get_status.return_value = {
'status': 'error',
'app_version': None,
'message': 'Cannot connect to Database'
}
self._search_api_client.get_status.return_value = {
'status': 'error',
'app_version': None,
'message': 'Cannot connect to elasticsearch'
}
response = self.client.get('/_status')
assert_equal(500, response.status_code)
json_data = json.loads(response.get_data().decode('utf-8'))
assert_equal("error", "{}".format(json_data['status']))
assert_equal("error", "{}".format(json_data['api_status']['status']))
assert_equal("error", "{}".format(
json_data['search_api_status']['status']))
|
py | b40dbf9a27db1fe73164f43d204c90233e759b0f | from django.urls import path
from share import views
urlpatterns = [
path('makePublic', views.makePublic, name="makePublic"),
path('makePrivate', views.makePrivate, name="makePrivate"),
path('<slug:slug>/', views.share, name="share"),
]
|
py | b40dbff861202129fdfa9a15f3f0467f31c53bb3 | import numpy as np
import igl
from context import gpytoolbox
# This tests crashes if you use the igl binding.
# This is a cube, centered at the origin, with side length 1
v,f = igl.read_triangle_mesh("unit_tests_data/cube.obj")
cam_pos = np.array([[1,0.1,0.1],[1,0.1,0.0]])
cam_dir = np.array([[-1,0,0],[-1,0,0]])
t, ids, l = gpytoolbox.ray_mesh_intersect(cam_pos.astype(np.float64),cam_dir.astype(np.float64),v.astype(np.float64),f.astype(np.int32))
# There should only be two hits (there are three because the C++ ray_mesh_intersect doesn't work well? )
print("Number of hits:", t.shape[0])
# t (travelled distance) should be 0.5
print("Travelled distance:", t[0])
# intersection should be [0.5,0.1,0.1]
intersection = cam_pos + t[:,None]*cam_dir
print("Intersected at: ", intersection) |
py | b40dc12736d099cf08e73998c3de930347278d28 | # Custom extension that zips all the examples for download.
from zipfile import ZipFile
import fnmatch
from os import path, walk
my_path = path.dirname(path.abspath(__file__))
root_path = path.abspath(path.join(my_path, '..', '..'))
zip_filename = path.join(root_path, 'docs', 'examples.zip')
def my_glob(folder, ext):
""" glob does not work recursively in all python versions """
for root, dirnames, filenames in walk(folder):
for filename in fnmatch.filter(filenames, ext):
yield path.join(root, filename)
def zip_examples(app):
glob_patterns = [
("examples", "*.c3"),
("examples", "*.asm"),
("examples", "*.mmap"),
("examples", "build.xml"),
("librt", "*.c3")]
with ZipFile(zip_filename, 'w') as myzip:
for folder, ext in glob_patterns:
pat = path.join(path.join(root_path, folder))
for filename in my_glob(pat, ext):
zfn = path.relpath(filename, root_path)
app.info('zipping {} as {}'.format(filename, zfn))
myzip.write(filename, zfn)
def setup(app):
app.connect('builder-inited', zip_examples)
return {'version': '0.1'}
|
py | b40dc1cd2c07fbecf3f8b6e9e1f943edf2f6030a | # -*- coding: utf-8 -*-
""" Implements a Class for Representing a Simulated Senate Election. """
from collections import Counter
from random import random
from random import seed as set_seed
from time import asctime
from time import localtime
from aus_senate_audit.senate_election.base_senate_election import BaseSenateElection
class SimulatedSenateElection(BaseSenateElection):
""" Implements a class for representing a simulated senate election.
:ivar int _sample_increment_size: The number of ballots to add to the growing sample during each audit stage.
NOTE: The :attr:`_candidates` and :attr:`_candidate_ids` instance attributes are set as a [1, ..., :attr:`_m`].
"""
TYPE = 'Simulated'
DEFAULT_ID = 'SimulatedElection{}'
def __init__(self, seed, n, m, sample_increment_size):
""" Initializes a :class:`SimulatedSenateElection` object.
The number of seats in a simulated senate election is equal to the floor of the number of candidates in the
election divided by two.
:param int seed: The starting value for the random number generator.
:param int n: The total number of ballots cast in the election.
:param int m: The total number of candidates in the election.
:param int sample_increment_size: The number of ballots to add to the growing sample during each audit stage.
"""
super(SimulatedSenateElection, self).__init__()
self._n = n
self._m = m
self._seats = int(self._m / 2)
self._candidates = list(range(1, self._m + 1))
self._candidate_ids = list(range(1, self._m + 1))
self._election_id = SimulatedSenateElection.DEFAULT_ID.format(asctime(localtime()))
self._sample_increment_size = sample_increment_size
set_seed(seed) # Set the initial value of the RNG.
def draw_ballots(self):
""" Adds simulated ballots to the sample of ballots drawn thus far.
These ballots are biased so (1, 2, ..., m) is likely to be the winner. More precisely, each ballot candidate `i`
is given a value `i + v * U` where `U = uniform(0, 1)` and `v` is the level of noise. Then the candidates are
sorted into increasing order by these values. Note that the total number of ballots drawn may not exceed the
total number of cast votes, :attr:`_n`.
"""
v = self._m / 2.0 # Noise level to control position variance.
batch_size = min(self._sample_increment_size, self._n - self._num_ballots_drawn)
for _ in range(batch_size):
candidate_values = [(i + v * random(), cid) for i, cid in enumerate(self._candidate_ids)]
ballot = tuple(cid for val, cid in sorted(candidate_values))
self.add_ballot(ballot, 1)
def get_outcome(self, ballot_weights):
""" Returns the outcome of a senate election with the given ballot weights.
The social choice function used in the simulated senate election is Borda count.
:param :class:`Counter` ballot_weights: A mapping from a ballot type to the number of ballots drawn of that
type.
:returns: The IDs of the candidates elected to the available seats, sorted in lexicographical order.
:rtype: tuple
"""
counter = Counter()
for ballot, weight in ballot_weights.items():
for i, cid in enumerate(ballot):
counter[cid] += weight * i
# Get the :attr:`_seat` candidates with the lowest Borda counts in increasing order.
winners = counter.most_common()[-self._seats:][::-1]
return tuple(sorted([cid for cid, count in winners]))
|
py | b40dc27bde0f31e70468b6e4742e595dec2f9e35 | #!/usr/bin/env python
#
# This program shows how to use Scatterv. Each processor gets a
# different amount of data from the root processor. We use MPI_Gather
# first to tell the root how much data is going to be sent.
#
import numpy
from numpy import *
from mpi4py import MPI
import sys
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
mpi_root=0
# Three step process for setting up the call to Scatterv
# Each processor gets different amounts of data in
# the Scatterv
# Step 1
# Here we set up the amount of data each will get.
mysize=2*myid+2
# Step 2
# Send the different numbers of values from each processor to the
# root
#mysize contains the number of values we will send in the scatterv
counts=comm.gather(mysize,root=mpi_root)
if myid != mpi_root:
displacements=None
sray=None
else:
# Step 3
# We set up the displacement array. This says where in our
# final array each processor will get its data. For the
# normal simple case this is just a running total of the
# counts array and thus is optional. See P_ex08.py for
# usage of the displacements array
print("counts=",counts)
displacements=zeros(numprocs,"i")
displacements[0]=0
for i in range(1, numprocs):
displacements[i]=counts[i-1]+displacements[i-1]
size=0
for i in range(0, numprocs):
size=size+counts[i]
sray=zeros(size,"i")
for i in range(0, size):
sray[i]=i
allray=empty(mysize,"i")
comm.Scatterv(sendbuf=[sray, (counts)], recvbuf=[allray, (mysize)], root=mpi_root)
print(myid,"allrray= ",allray)
MPI.Finalize()
|
py | b40dc2e3448a9ed09f69528de5b93e2a4a207893 | import PyPDF2
import re
pdf_file = open('coroa3.pdf', 'rb')
read_pdf = PyPDF2.PdfFileReader(pdf_file)
number_of_pages = read_pdf.getNumPages()
page = read_pdf.getPage(35).extractText()
pdf_file.close()
#page_content = page.extractText()
##parsed = ''.join(page_content)
##parsed = re.sub('\n', '', parsed)
print(len(page))
|
py | b40dc345a447a63ee1d8764e525f4aae67318e96 | # This program checks that a machine is up by sending it a ping. It shows if the ping to a device was "ok" or "unsuccessful".
from subprocess import check_call, CalledProcessError,PIPE
import time
def pingiprange():
# Principe du programme :
print("This program will run a Ping request every 5 seconds on a round of IP's until told to stop (using ctrl+c).")
# Combien d'ip faut-il surveiller ?
n = int(input("How many IP's are we checking: "))
ips = []
while n<1:
# On se prépare à l'exception ou un nombre d'ip inférieur à 1 est saisi.
n = int(input("Please input a number above 0 :"))
if n>0:
ips = [input("Enter IP number {}: ".format(i)) for i in range(1, n + 1)]
while True:
# Pour chaque ip renseignée, on envoie un ping :
for ip in ips:
try:
out = check_call(['ping', '-n', '2', ip],stdout=PIPE)
except CalledProcessError as e:
# Si absence de réponse au ping alors :
print("Ping to {} unsuccessful".format(ip))
continue
# Si on arrive jusqu'ici, le ping a fonctionné :
print("Ping to {} ok".format(ip))
print("Ctrl+c to stop this program.")
time.sleep(3)
|
py | b40dc3508340234d3abf638ddc9bacdceef4feb4 | #!/usr/bin/env python3
import singer
import tap_framework
from tap_lever.client import LeverClient
from tap_lever.streams import AVAILABLE_STREAMS
LOGGER = singer.get_logger() # noqa
class LeverRunner(tap_framework.Runner):
pass
@singer.utils.handle_top_exception(LOGGER)
def main():
args = singer.utils.parse_args(required_config_keys=['token'])
client = LeverClient(args.config)
runner = LeverRunner(
args, client, AVAILABLE_STREAMS)
if args.discover:
runner.do_discover()
else:
runner.do_sync()
if __name__ == '__main__':
main()
|
py | b40dc3738498df8254303b3c85cf245d0da0b85e | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Copyright (c) 2018 CTS Developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "cts.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
ctsd and cts-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run ctsd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "ctsd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "cts-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in cts.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a ctsd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "ctsd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "cts-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple ctsds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
py | b40dc412859d9f76e6288792167077733c76e3bb | # coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from influxdb_client.domain.view_properties import ViewProperties
class HistogramViewProperties(ViewProperties):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'queries': 'list[DashboardQuery]',
'colors': 'list[DashboardColor]',
'shape': 'str',
'note': 'str',
'show_note_when_empty': 'bool',
'x_column': 'str',
'fill_columns': 'list[str]',
'x_domain': 'list[float]',
'x_axis_label': 'str',
'position': 'str',
'bin_count': 'int',
'legend_colorize_rows': 'bool',
'legend_hide': 'bool',
'legend_opacity': 'float',
'legend_orientation_threshold': 'int'
}
attribute_map = {
'type': 'type',
'queries': 'queries',
'colors': 'colors',
'shape': 'shape',
'note': 'note',
'show_note_when_empty': 'showNoteWhenEmpty',
'x_column': 'xColumn',
'fill_columns': 'fillColumns',
'x_domain': 'xDomain',
'x_axis_label': 'xAxisLabel',
'position': 'position',
'bin_count': 'binCount',
'legend_colorize_rows': 'legendColorizeRows',
'legend_hide': 'legendHide',
'legend_opacity': 'legendOpacity',
'legend_orientation_threshold': 'legendOrientationThreshold'
}
def __init__(self, type=None, queries=None, colors=None, shape=None, note=None, show_note_when_empty=None, x_column=None, fill_columns=None, x_domain=None, x_axis_label=None, position=None, bin_count=None, legend_colorize_rows=None, legend_hide=None, legend_opacity=None, legend_orientation_threshold=None): # noqa: E501,D401,D403
"""HistogramViewProperties - a model defined in OpenAPI.""" # noqa: E501
ViewProperties.__init__(self) # noqa: E501
self._type = None
self._queries = None
self._colors = None
self._shape = None
self._note = None
self._show_note_when_empty = None
self._x_column = None
self._fill_columns = None
self._x_domain = None
self._x_axis_label = None
self._position = None
self._bin_count = None
self._legend_colorize_rows = None
self._legend_hide = None
self._legend_opacity = None
self._legend_orientation_threshold = None
self.discriminator = None
self.type = type
self.queries = queries
self.colors = colors
self.shape = shape
self.note = note
self.show_note_when_empty = show_note_when_empty
self.x_column = x_column
self.fill_columns = fill_columns
self.x_domain = x_domain
self.x_axis_label = x_axis_label
self.position = position
self.bin_count = bin_count
if legend_colorize_rows is not None:
self.legend_colorize_rows = legend_colorize_rows
if legend_hide is not None:
self.legend_hide = legend_hide
if legend_opacity is not None:
self.legend_opacity = legend_opacity
if legend_orientation_threshold is not None:
self.legend_orientation_threshold = legend_orientation_threshold
@property
def type(self):
"""Get the type of this HistogramViewProperties.
:return: The type of this HistogramViewProperties.
:rtype: str
""" # noqa: E501
return self._type
@type.setter
def type(self, type):
"""Set the type of this HistogramViewProperties.
:param type: The type of this HistogramViewProperties.
:type: str
""" # noqa: E501
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def queries(self):
"""Get the queries of this HistogramViewProperties.
:return: The queries of this HistogramViewProperties.
:rtype: list[DashboardQuery]
""" # noqa: E501
return self._queries
@queries.setter
def queries(self, queries):
"""Set the queries of this HistogramViewProperties.
:param queries: The queries of this HistogramViewProperties.
:type: list[DashboardQuery]
""" # noqa: E501
if queries is None:
raise ValueError("Invalid value for `queries`, must not be `None`") # noqa: E501
self._queries = queries
@property
def colors(self):
"""Get the colors of this HistogramViewProperties.
Colors define color encoding of data into a visualization
:return: The colors of this HistogramViewProperties.
:rtype: list[DashboardColor]
""" # noqa: E501
return self._colors
@colors.setter
def colors(self, colors):
"""Set the colors of this HistogramViewProperties.
Colors define color encoding of data into a visualization
:param colors: The colors of this HistogramViewProperties.
:type: list[DashboardColor]
""" # noqa: E501
if colors is None:
raise ValueError("Invalid value for `colors`, must not be `None`") # noqa: E501
self._colors = colors
@property
def shape(self):
"""Get the shape of this HistogramViewProperties.
:return: The shape of this HistogramViewProperties.
:rtype: str
""" # noqa: E501
return self._shape
@shape.setter
def shape(self, shape):
"""Set the shape of this HistogramViewProperties.
:param shape: The shape of this HistogramViewProperties.
:type: str
""" # noqa: E501
if shape is None:
raise ValueError("Invalid value for `shape`, must not be `None`") # noqa: E501
self._shape = shape
@property
def note(self):
"""Get the note of this HistogramViewProperties.
:return: The note of this HistogramViewProperties.
:rtype: str
""" # noqa: E501
return self._note
@note.setter
def note(self, note):
"""Set the note of this HistogramViewProperties.
:param note: The note of this HistogramViewProperties.
:type: str
""" # noqa: E501
if note is None:
raise ValueError("Invalid value for `note`, must not be `None`") # noqa: E501
self._note = note
@property
def show_note_when_empty(self):
"""Get the show_note_when_empty of this HistogramViewProperties.
If true, will display note when empty
:return: The show_note_when_empty of this HistogramViewProperties.
:rtype: bool
""" # noqa: E501
return self._show_note_when_empty
@show_note_when_empty.setter
def show_note_when_empty(self, show_note_when_empty):
"""Set the show_note_when_empty of this HistogramViewProperties.
If true, will display note when empty
:param show_note_when_empty: The show_note_when_empty of this HistogramViewProperties.
:type: bool
""" # noqa: E501
if show_note_when_empty is None:
raise ValueError("Invalid value for `show_note_when_empty`, must not be `None`") # noqa: E501
self._show_note_when_empty = show_note_when_empty
@property
def x_column(self):
"""Get the x_column of this HistogramViewProperties.
:return: The x_column of this HistogramViewProperties.
:rtype: str
""" # noqa: E501
return self._x_column
@x_column.setter
def x_column(self, x_column):
"""Set the x_column of this HistogramViewProperties.
:param x_column: The x_column of this HistogramViewProperties.
:type: str
""" # noqa: E501
if x_column is None:
raise ValueError("Invalid value for `x_column`, must not be `None`") # noqa: E501
self._x_column = x_column
@property
def fill_columns(self):
"""Get the fill_columns of this HistogramViewProperties.
:return: The fill_columns of this HistogramViewProperties.
:rtype: list[str]
""" # noqa: E501
return self._fill_columns
@fill_columns.setter
def fill_columns(self, fill_columns):
"""Set the fill_columns of this HistogramViewProperties.
:param fill_columns: The fill_columns of this HistogramViewProperties.
:type: list[str]
""" # noqa: E501
if fill_columns is None:
raise ValueError("Invalid value for `fill_columns`, must not be `None`") # noqa: E501
self._fill_columns = fill_columns
@property
def x_domain(self):
"""Get the x_domain of this HistogramViewProperties.
:return: The x_domain of this HistogramViewProperties.
:rtype: list[float]
""" # noqa: E501
return self._x_domain
@x_domain.setter
def x_domain(self, x_domain):
"""Set the x_domain of this HistogramViewProperties.
:param x_domain: The x_domain of this HistogramViewProperties.
:type: list[float]
""" # noqa: E501
if x_domain is None:
raise ValueError("Invalid value for `x_domain`, must not be `None`") # noqa: E501
self._x_domain = x_domain
@property
def x_axis_label(self):
"""Get the x_axis_label of this HistogramViewProperties.
:return: The x_axis_label of this HistogramViewProperties.
:rtype: str
""" # noqa: E501
return self._x_axis_label
@x_axis_label.setter
def x_axis_label(self, x_axis_label):
"""Set the x_axis_label of this HistogramViewProperties.
:param x_axis_label: The x_axis_label of this HistogramViewProperties.
:type: str
""" # noqa: E501
if x_axis_label is None:
raise ValueError("Invalid value for `x_axis_label`, must not be `None`") # noqa: E501
self._x_axis_label = x_axis_label
@property
def position(self):
"""Get the position of this HistogramViewProperties.
:return: The position of this HistogramViewProperties.
:rtype: str
""" # noqa: E501
return self._position
@position.setter
def position(self, position):
"""Set the position of this HistogramViewProperties.
:param position: The position of this HistogramViewProperties.
:type: str
""" # noqa: E501
if position is None:
raise ValueError("Invalid value for `position`, must not be `None`") # noqa: E501
self._position = position
@property
def bin_count(self):
"""Get the bin_count of this HistogramViewProperties.
:return: The bin_count of this HistogramViewProperties.
:rtype: int
""" # noqa: E501
return self._bin_count
@bin_count.setter
def bin_count(self, bin_count):
"""Set the bin_count of this HistogramViewProperties.
:param bin_count: The bin_count of this HistogramViewProperties.
:type: int
""" # noqa: E501
if bin_count is None:
raise ValueError("Invalid value for `bin_count`, must not be `None`") # noqa: E501
self._bin_count = bin_count
@property
def legend_colorize_rows(self):
"""Get the legend_colorize_rows of this HistogramViewProperties.
:return: The legend_colorize_rows of this HistogramViewProperties.
:rtype: bool
""" # noqa: E501
return self._legend_colorize_rows
@legend_colorize_rows.setter
def legend_colorize_rows(self, legend_colorize_rows):
"""Set the legend_colorize_rows of this HistogramViewProperties.
:param legend_colorize_rows: The legend_colorize_rows of this HistogramViewProperties.
:type: bool
""" # noqa: E501
self._legend_colorize_rows = legend_colorize_rows
@property
def legend_hide(self):
"""Get the legend_hide of this HistogramViewProperties.
:return: The legend_hide of this HistogramViewProperties.
:rtype: bool
""" # noqa: E501
return self._legend_hide
@legend_hide.setter
def legend_hide(self, legend_hide):
"""Set the legend_hide of this HistogramViewProperties.
:param legend_hide: The legend_hide of this HistogramViewProperties.
:type: bool
""" # noqa: E501
self._legend_hide = legend_hide
@property
def legend_opacity(self):
"""Get the legend_opacity of this HistogramViewProperties.
:return: The legend_opacity of this HistogramViewProperties.
:rtype: float
""" # noqa: E501
return self._legend_opacity
@legend_opacity.setter
def legend_opacity(self, legend_opacity):
"""Set the legend_opacity of this HistogramViewProperties.
:param legend_opacity: The legend_opacity of this HistogramViewProperties.
:type: float
""" # noqa: E501
self._legend_opacity = legend_opacity
@property
def legend_orientation_threshold(self):
"""Get the legend_orientation_threshold of this HistogramViewProperties.
:return: The legend_orientation_threshold of this HistogramViewProperties.
:rtype: int
""" # noqa: E501
return self._legend_orientation_threshold
@legend_orientation_threshold.setter
def legend_orientation_threshold(self, legend_orientation_threshold):
"""Set the legend_orientation_threshold of this HistogramViewProperties.
:param legend_orientation_threshold: The legend_orientation_threshold of this HistogramViewProperties.
:type: int
""" # noqa: E501
self._legend_orientation_threshold = legend_orientation_threshold
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, HistogramViewProperties):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
py | b40dc41d821cb463f4d961653d772dcbdde25691 | test = { 'name': 'q24',
'points': 4,
'suites': [ { 'cases': [ { 'code': '>>> pivot_GlobalSales.shape\n'
'(17, 12)',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
py | b40dc472948f7133d5f1afc7fd72dde450761ca9 | from commands import add_to_friends_on_chat_enter
from commands import aliases
from commands import aliases_manager
from commands import auto_exit_from_chat
from commands import delete_messages
from commands import delete_messages_vks
from commands import delete_notify
from commands import disable_notifications
from commands import duty_signal
from commands import info
from commands import members_manager
from commands import ping
from commands import prefixes
from commands import regex_deleter
from commands import repeat
from commands import role_play_commands
from commands import run_eval
from commands import self_signal
from commands import set_secret_code
from commands import sloumo
commands_bp = (
add_to_friends_on_chat_enter.user,
aliases.user,
aliases_manager.user,
auto_exit_from_chat.user,
delete_messages.user,
delete_messages_vks.user,
delete_notify.user,
disable_notifications.user,
duty_signal.user,
run_eval.user,
ping.user,
info.user,
prefixes.user,
regex_deleter.user,
repeat.user,
role_play_commands.user,
self_signal.user,
set_secret_code.user,
sloumo.user,
*members_manager.users_bp,
)
|
py | b40dc4eb346f61290c665fc843aaeaaea3ed4f93 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Workspace']
class Workspace(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
public_network_access_for_ingestion: Optional[pulumi.Input[str]] = None,
public_network_access_for_query: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
retention_in_days: Optional[pulumi.Input[int]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['WorkspaceSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_capping: Optional[pulumi.Input[pulumi.InputType['WorkspaceCappingArgs']]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The top level Workspace resource container.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: The ETag of the workspace.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] provisioning_state: The provisioning state of the workspace.
:param pulumi.Input[str] public_network_access_for_ingestion: The network access type for accessing Log Analytics ingestion.
:param pulumi.Input[str] public_network_access_for_query: The network access type for accessing Log Analytics query.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[int] retention_in_days: The workspace data retention in days. -1 means Unlimited retention for the Unlimited Sku. 730 days is the maximum allowed for all other Skus.
:param pulumi.Input[pulumi.InputType['WorkspaceSkuArgs']] sku: The SKU of the workspace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['WorkspaceCappingArgs']] workspace_capping: The daily volume cap for ingestion.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['provisioning_state'] = provisioning_state
__props__['public_network_access_for_ingestion'] = public_network_access_for_ingestion
__props__['public_network_access_for_query'] = public_network_access_for_query
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['retention_in_days'] = retention_in_days
__props__['sku'] = sku
__props__['tags'] = tags
__props__['workspace_capping'] = workspace_capping
if workspace_name is None:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['customer_id'] = None
__props__['name'] = None
__props__['private_link_scoped_resources'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:operationalinsights/latest:Workspace"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20151101preview:Workspace"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20200801:Workspace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Workspace, __self__).__init__(
'azure-nextgen:operationalinsights/v20200301preview:Workspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Workspace':
"""
Get an existing Workspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Workspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="customerId")
def customer_id(self) -> pulumi.Output[str]:
"""
This is a read-only property. Represents the ID associated with the workspace.
"""
return pulumi.get(self, "customer_id")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
The ETag of the workspace.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateLinkScopedResources")
def private_link_scoped_resources(self) -> pulumi.Output[Sequence['outputs.PrivateLinkScopedResourceResponse']]:
"""
List of linked private link scope resources.
"""
return pulumi.get(self, "private_link_scoped_resources")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the workspace.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccessForIngestion")
def public_network_access_for_ingestion(self) -> pulumi.Output[Optional[str]]:
"""
The network access type for accessing Log Analytics ingestion.
"""
return pulumi.get(self, "public_network_access_for_ingestion")
@property
@pulumi.getter(name="publicNetworkAccessForQuery")
def public_network_access_for_query(self) -> pulumi.Output[Optional[str]]:
"""
The network access type for accessing Log Analytics query.
"""
return pulumi.get(self, "public_network_access_for_query")
@property
@pulumi.getter(name="retentionInDays")
def retention_in_days(self) -> pulumi.Output[Optional[int]]:
"""
The workspace data retention in days. -1 means Unlimited retention for the Unlimited Sku. 730 days is the maximum allowed for all other Skus.
"""
return pulumi.get(self, "retention_in_days")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.WorkspaceSkuResponse']]:
"""
The SKU of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceCapping")
def workspace_capping(self) -> pulumi.Output[Optional['outputs.WorkspaceCappingResponse']]:
"""
The daily volume cap for ingestion.
"""
return pulumi.get(self, "workspace_capping")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b40dc55d7920516ac47956a29ca5a54e02b7d675 | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
from climateeconomics.core.core_dice.geophysical_model import CarbonEmissions
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
import pandas as pd
class CarbonemissionsDiscipline(SoSDiscipline):
"carbonemissions discipline for DICE"
# ontology information
_ontology_data = {
'label': 'Carbon Emissions DICE Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': 'fas fa-smog fa-fw',
'version': '',
}
_maturity = 'Research'
DESC_IN = {
'year_start': {'type': 'int', 'unit': 'year', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'year_end': {'type': 'int', 'unit': 'year', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'time_step': {'type': 'int', 'unit': 'years per period', 'visibility': 'Shared', 'namespace': 'ns_dice'},
'init_land_emissions': {'type': 'float', 'unit': 'GtCO2 per year', 'default': 2.6},
'decline_rate_land_emissions': {'type': 'float', 'default': .115},
'init_cum_land_emisisons': {'type': 'float', 'unit': 'GtCO2', 'default': 100},
'init_gr_sigma': {'type': 'float', 'default': -0.0152},
'decline_rate_decarbo': {'type': 'float', 'default': -0.001},
'init_indus_emissions': {'type': 'float', 'unit': 'GtCO2 per year', 'default': 35.745},
'init_gross_output': {'type': 'float', 'unit': 'trillions $', 'visibility': 'Shared', 'namespace': 'ns_dice', 'default': 105.1},
'init_cum_indus_emissions': {'type': 'float', 'unit': 'GtCO2', 'default': 400},
'economics_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario'},
'emissions_control_rate': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario',
'dataframe_descriptor': {'year': ('float', None, False), 'value': ('float', None, True)},
'dataframe_edition_locked': False}
}
DESC_OUT = {
'emissions_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_scenario'}
}
def run(self):
# Get inputs
in_dict = self.get_sosdisc_inputs()
emissions_control_rate = in_dict.pop('emissions_control_rate')
# Compute de emissions_model
emissions_model = CarbonEmissions(in_dict)
emissions_df = emissions_model.compute(in_dict, emissions_control_rate)
# Warning : float are mandatory for MDA ...
emissions_df = emissions_df.astype(float)
# Store output data
dict_values = {'emissions_df': emissions_df}
self.store_sos_outputs_values(dict_values)
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['carbon emission', 'emission control rate']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
# Overload default value with chart filter
if chart_filters is not None:
for chart_filter in chart_filters:
if chart_filter.filter_key == 'charts':
chart_list = chart_filter.selected_values
emissions_df = self.get_sosdisc_outputs('emissions_df')
emissions_df = resize_df(emissions_df)
if 'carbon emission' in chart_list:
to_plot = ['total_emissions', 'land_emissions', 'indus_emissions']
#emissions_df = discipline.get_sosdisc_outputs('emissions_df')
total_emission = emissions_df['total_emissions']
years = list(emissions_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
max_value = total_emission.values.max()
chart_name = 'total carbon emissions'
new_chart = TwoAxesInstanciatedChart('years', 'carbon emissions (Gtc)',
[year_start - 5, year_end + 5], [
0, max_value * 1.1],
chart_name)
for key in to_plot:
visible_line = True
c_emission = list(emissions_df[key])
new_series = InstanciatedSeries(
years, c_emission, key, 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'emission control rate' in chart_list:
to_plot = ['emissions_control_rate']
inputs = self.get_sosdisc_inputs()
control_rate_df = inputs.pop('emissions_control_rate')
control_rate = list(control_rate_df['value'])
emissions_df = self.get_sosdisc_outputs('emissions_df')
total_emission = emissions_df['total_emissions']
years = list(total_emission.index)
year_start = years[0]
year_end = years[len(years) - 1]
max_value = max(control_rate)
chart_name = 'emission control rate over the years'
new_chart = TwoAxesInstanciatedChart('years', 'emission control rate',
[year_start - 5, year_end + 5], [
0, max_value * 1.1],
chart_name)
for key in to_plot:
visible_line = True
new_series = InstanciatedSeries(
years, control_rate, 'emissions_control_rate', 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
return instanciated_charts
def resize_df(df):
index = df.index
i = len(index) - 1
key = df.keys()
to_check = df.loc[index[i], key[0]]
while to_check == 0:
i = i - 1
to_check = df.loc[index[i], key[0]]
size_diff = len(index) - i
new_df = pd.DataFrame()
if size_diff == 0:
new_df = df
else:
for element in key:
new_df[element] = df[element][0:i + 1]
new_df.index = index[0: i + 1]
return new_df
def resize_array(array):
i = len(array) - 1
to_check = array[i]
while to_check == 0:
i = i - 1
to_check = to_check = array[i]
size_diff = len(array) - i
new_array = array[0:i]
return new_array
def resize_index(index, array):
l = len(array)
new_index = index[0:l]
return new_index
|
py | b40dc5aee79bcf1042902cc79bb677ff8d623688 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui/mainwindow.ui'
#
# Created: Mon Aug 24 09:59:26 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(996, 715)
self.centralWidget = QtGui.QWidget(MainWindow)
self.centralWidget.setObjectName(_fromUtf8("centralWidget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.centralWidget)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.data_input_group_box = QtGui.QGroupBox(self.centralWidget)
self.data_input_group_box.setMaximumSize(QtCore.QSize(16777215, 281))
font = QtGui.QFont()
font.setPointSize(14)
self.data_input_group_box.setFont(font)
self.data_input_group_box.setObjectName(_fromUtf8("data_input_group_box"))
self.horizontalLayout = QtGui.QHBoxLayout(self.data_input_group_box)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.offsetMarkerTableView = QtGui.QTableView(self.data_input_group_box)
self.offsetMarkerTableView.setObjectName(_fromUtf8("offsetMarkerTableView"))
self.horizontalLayout.addWidget(self.offsetMarkerTableView)
self.off_marker_button_vlayout = QtGui.QVBoxLayout()
self.off_marker_button_vlayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.off_marker_button_vlayout.setObjectName(_fromUtf8("off_marker_button_vlayout"))
self.moveOffsetMarkerUpButton = QtGui.QPushButton(self.data_input_group_box)
self.moveOffsetMarkerUpButton.setObjectName(_fromUtf8("moveOffsetMarkerUpButton"))
self.off_marker_button_vlayout.addWidget(self.moveOffsetMarkerUpButton)
self.moveOffsetMarkerDownButton = QtGui.QPushButton(self.data_input_group_box)
self.moveOffsetMarkerDownButton.setObjectName(_fromUtf8("moveOffsetMarkerDownButton"))
self.off_marker_button_vlayout.addWidget(self.moveOffsetMarkerDownButton)
spacerItem = QtGui.QSpacerItem(28, 18, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.off_marker_button_vlayout.addItem(spacerItem)
self.addOffsetMarkerButton = QtGui.QPushButton(self.data_input_group_box)
self.addOffsetMarkerButton.setObjectName(_fromUtf8("addOffsetMarkerButton"))
self.off_marker_button_vlayout.addWidget(self.addOffsetMarkerButton)
self.removeOffsetMarkerButton = QtGui.QPushButton(self.data_input_group_box)
self.removeOffsetMarkerButton.setObjectName(_fromUtf8("removeOffsetMarkerButton"))
self.off_marker_button_vlayout.addWidget(self.removeOffsetMarkerButton)
self.horizontalLayout.addLayout(self.off_marker_button_vlayout)
self.verticalLayout_2.addWidget(self.data_input_group_box)
self.run_config_box = QtGui.QGroupBox(self.centralWidget)
self.run_config_box.setMaximumSize(QtCore.QSize(16777215, 110))
font = QtGui.QFont()
font.setPointSize(14)
self.run_config_box.setFont(font)
self.run_config_box.setObjectName(_fromUtf8("run_config_box"))
self.verticalLayout = QtGui.QVBoxLayout(self.run_config_box)
self.verticalLayout.setContentsMargins(2, 4, 2, 4)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.importButton = QtGui.QPushButton(self.run_config_box)
self.importButton.setObjectName(_fromUtf8("importButton"))
self.horizontalLayout_2.addWidget(self.importButton)
self.exportButton = QtGui.QPushButton(self.run_config_box)
self.exportButton.setObjectName(_fromUtf8("exportButton"))
self.horizontalLayout_2.addWidget(self.exportButton)
spacerItem1 = QtGui.QSpacerItem(13, 13, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.fit_type_label = QtGui.QLabel(self.run_config_box)
self.fit_type_label.setObjectName(_fromUtf8("fit_type_label"))
self.horizontalLayout_2.addWidget(self.fit_type_label)
self.linearFitRadio = QtGui.QRadioButton(self.run_config_box)
self.linearFitRadio.setChecked(True)
self.linearFitRadio.setObjectName(_fromUtf8("linearFitRadio"))
self.fitTypeButtonGroup = QtGui.QButtonGroup(MainWindow)
self.fitTypeButtonGroup.setObjectName(_fromUtf8("fitTypeButtonGroup"))
self.fitTypeButtonGroup.addButton(self.linearFitRadio)
self.horizontalLayout_2.addWidget(self.linearFitRadio)
self.piecewiseFitRadio = QtGui.QRadioButton(self.run_config_box)
self.piecewiseFitRadio.setObjectName(_fromUtf8("piecewiseFitRadio"))
self.fitTypeButtonGroup.addButton(self.piecewiseFitRadio)
self.horizontalLayout_2.addWidget(self.piecewiseFitRadio)
self.nPiecesSpinBox = QtGui.QSpinBox(self.run_config_box)
self.nPiecesSpinBox.setMinimumSize(QtCore.QSize(0, 20))
self.nPiecesSpinBox.setMinimum(2)
self.nPiecesSpinBox.setMaximum(10)
self.nPiecesSpinBox.setProperty("value", 2)
self.nPiecesSpinBox.setObjectName(_fromUtf8("nPiecesSpinBox"))
self.horizontalLayout_2.addWidget(self.nPiecesSpinBox)
self.cubicFitRadio = QtGui.QRadioButton(self.run_config_box)
self.cubicFitRadio.setObjectName(_fromUtf8("cubicFitRadio"))
self.fitTypeButtonGroup.addButton(self.cubicFitRadio)
self.horizontalLayout_2.addWidget(self.cubicFitRadio)
spacerItem2 = QtGui.QSpacerItem(13, 13, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.runButton = QtGui.QPushButton(self.run_config_box)
font = QtGui.QFont()
font.setPointSize(15)
self.runButton.setFont(font)
self.runButton.setObjectName(_fromUtf8("runButton"))
self.horizontalLayout_2.addWidget(self.runButton)
self.cancelButton = QtGui.QPushButton(self.run_config_box)
font = QtGui.QFont()
font.setPointSize(15)
self.cancelButton.setFont(font)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayout_2.addWidget(self.cancelButton)
self.plotButton = QtGui.QPushButton(self.run_config_box)
font = QtGui.QFont()
font.setPointSize(15)
self.plotButton.setFont(font)
self.plotButton.setObjectName(_fromUtf8("plotButton"))
self.horizontalLayout_2.addWidget(self.plotButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.nItersLineEdit = QtGui.QLineEdit(self.run_config_box)
self.nItersLineEdit.setObjectName(_fromUtf8("nItersLineEdit"))
self.horizontalLayout_3.addWidget(self.nItersLineEdit)
self.nItersLabel = QtGui.QLabel(self.run_config_box)
self.nItersLabel.setObjectName(_fromUtf8("nItersLabel"))
self.horizontalLayout_3.addWidget(self.nItersLabel)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.zeroOffsetLineEdit = QtGui.QLineEdit(self.run_config_box)
self.zeroOffsetLineEdit.setObjectName(_fromUtf8("zeroOffsetLineEdit"))
self.horizontalLayout_3.addWidget(self.zeroOffsetLineEdit)
self.zeroOffsetLabel = QtGui.QLabel(self.run_config_box)
self.zeroOffsetLabel.setObjectName(_fromUtf8("zeroOffsetLabel"))
self.horizontalLayout_3.addWidget(self.zeroOffsetLabel)
spacerItem4 = QtGui.QSpacerItem(48, 17, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem4)
self.randSeedLineEdit = QtGui.QLineEdit(self.run_config_box)
self.randSeedLineEdit.setObjectName(_fromUtf8("randSeedLineEdit"))
self.horizontalLayout_3.addWidget(self.randSeedLineEdit)
self.randSeedCheckBox = QtGui.QCheckBox(self.run_config_box)
self.randSeedCheckBox.setChecked(True)
self.randSeedCheckBox.setObjectName(_fromUtf8("randSeedCheckBox"))
self.horizontalLayout_3.addWidget(self.randSeedCheckBox)
spacerItem5 = QtGui.QSpacerItem(38, 17, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem5)
self.forceIncrCheckBox = QtGui.QCheckBox(self.run_config_box)
self.forceIncrCheckBox.setObjectName(_fromUtf8("forceIncrCheckBox"))
self.horizontalLayout_3.addWidget(self.forceIncrCheckBox)
self.slipRevCheckBox = QtGui.QCheckBox(self.run_config_box)
self.slipRevCheckBox.setObjectName(_fromUtf8("slipRevCheckBox"))
self.horizontalLayout_3.addWidget(self.slipRevCheckBox)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.verticalLayout_2.addWidget(self.run_config_box)
self.widget = QtGui.QWidget(self.centralWidget)
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.widget)
self.horizontalLayout_4.setMargin(0)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.vlayout_for_ipython = QtGui.QVBoxLayout()
self.vlayout_for_ipython.setObjectName(_fromUtf8("vlayout_for_ipython"))
self.horizontalLayout_4.addLayout(self.vlayout_for_ipython)
self.verticalLayout_2.addWidget(self.widget)
MainWindow.setCentralWidget(self.centralWidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.data_input_group_box.setTitle(_translate("MainWindow", "Data Input", None))
self.moveOffsetMarkerUpButton.setText(_translate("MainWindow", "^", None))
self.moveOffsetMarkerDownButton.setText(_translate("MainWindow", "v", None))
self.addOffsetMarkerButton.setText(_translate("MainWindow", "+", None))
self.removeOffsetMarkerButton.setText(_translate("MainWindow", "-", None))
self.run_config_box.setTitle(_translate("MainWindow", "Run Configuration", None))
self.importButton.setText(_translate("MainWindow", "Import", None))
self.exportButton.setText(_translate("MainWindow", "Export", None))
self.fit_type_label.setText(_translate("MainWindow", "Fit Type", None))
self.linearFitRadio.setText(_translate("MainWindow", "Linear", None))
self.piecewiseFitRadio.setText(_translate("MainWindow", "Piecewise Linear", None))
self.cubicFitRadio.setText(_translate("MainWindow", "Cubic Spline", None))
self.runButton.setText(_translate("MainWindow", "Run", None))
self.cancelButton.setText(_translate("MainWindow", "Cancel", None))
self.plotButton.setText(_translate("MainWindow", "Plot", None))
self.nItersLineEdit.setText(_translate("MainWindow", "1000", None))
self.nItersLabel.setText(_translate("MainWindow", "Iterations", None))
self.zeroOffsetLineEdit.setText(_translate("MainWindow", "0.", None))
self.zeroOffsetLabel.setText(_translate("MainWindow", "Zero offset age", None))
self.randSeedLineEdit.setText(_translate("MainWindow", "69.", None))
self.randSeedCheckBox.setText(_translate("MainWindow", "random seed", None))
self.forceIncrCheckBox.setText(_translate("MainWindow", "Force increasing data", None))
self.slipRevCheckBox.setText(_translate("MainWindow", "Allow slip reversals", None))
|
py | b40dc62f9c5e1267765768b5380f49a6fe25a8cf | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CheckVerifyCodeResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'token': 'str',
'expire': 'int'
}
attribute_map = {
'token': 'token',
'expire': 'expire'
}
def __init__(self, token=None, expire=None):
"""CheckVerifyCodeResponse - a model defined in huaweicloud sdk"""
super(CheckVerifyCodeResponse, self).__init__()
self._token = None
self._expire = None
self.discriminator = None
if token is not None:
self.token = token
if expire is not None:
self.expire = expire
@property
def token(self):
"""Gets the token of this CheckVerifyCodeResponse.
访问token字符串
:return: The token of this CheckVerifyCodeResponse.
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this CheckVerifyCodeResponse.
访问token字符串
:param token: The token of this CheckVerifyCodeResponse.
:type: str
"""
self._token = token
@property
def expire(self):
"""Gets the expire of this CheckVerifyCodeResponse.
过期时间,单位:秒
:return: The expire of this CheckVerifyCodeResponse.
:rtype: int
"""
return self._expire
@expire.setter
def expire(self, expire):
"""Sets the expire of this CheckVerifyCodeResponse.
过期时间,单位:秒
:param expire: The expire of this CheckVerifyCodeResponse.
:type: int
"""
self._expire = expire
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CheckVerifyCodeResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40dc69a153ade7e5721c26a8278704d8a733bea | import os
import sys
from typing import List
from pydantic import BaseSettings
if os.getenv("ENV_FILE"):
ENV_FILE_BASE = str(os.path.splitext(os.path.basename(os.getenv("ENV_FILE")))[0])
else:
sys.exit("ERROR: 'ENV_FILE' missing.")
class UtilsSettings(BaseSettings):
"""Utils settings.
* `BASE_URL`: Base URL of your Dataverse installation without trailing slash.
* `USER_FILENAME`: relative path to user file (JSON)
* `PRODUCTION`: If `true`, the creation and removal of test-data is not activated. This is important to set to `true`, if this represents a production instance, as you don't want to create or delete data on production. To allow creation or removal on a production instance, you have to pass `--force` to the function call.
* `INSTANCE`: Name for instance. First an institution-specific string (e. g. "aussda"), second an installation-specific one (e. g. "production"). This will also be the folder name, where your test-specific data is stored in (`src/dvtests/testing/data/INSTANCE/`, `src/dvtests/testing/custom/INSTANCE/`)
* `FILENAME_DATAVERSES`: name of created dataverse JSON file from utils collect
* `FILENAME_DATASETS`: name of created datasets JSON file from utils collect
* `FILENAME_DATAFILES`: name of created datafiles JSON file from utils collect
* `FILENAME_METADATA`: name of created metadata JSON file from utils collect
* `BUILTIN_USER_KEY`: Builtin user key to create users
"""
BASE_URL: str
USER_FILENAME: str
PRODUCTION: bool = False
INSTANCE: str = ENV_FILE_BASE
FILENAME_DATAVERSES: str = "dataverses.json"
FILENAME_DATASETS: str = "datasets.json"
FILENAME_DATAFILES: str = "datafiles.json"
FILENAME_METADATA: str = "metadata.json"
BUILTIN_USER_KEY: str = None
class TestSettings(BaseSettings):
"""Settings for Testing.
* `BASE_URL`: Base URL of your Dataverse installation without trailing slash.
* `USER_FILENAME`: relative path to user file (JSON)
* `VERSION`: Dataverse version string. e. g. `4.20`
* `INSTANCE`: Name for instance. First an institution-specific string (e. g. "aussda"), second an installation-specific one (e. g. "production"). This will also be the folder name, where your test-specific data is stored in (`src/dvtests/testing/data/INSTANCE/`, `src/dvtests/testing/custom/INSTANCE/`)
* `HEADLESS`: Executes Selenium tests with or without browser window opening (default = `true` -> without browser window).
* `USER_AGENT`: Passed user agent for requests and selenium requests (e. g. `SELENIUM-TEST`). This allows to exclude tracking by your web-analytics tool (e. g. Matomo, Google Analytics) of requests done by Dataverse tests. For this, you have to tell your web-analytics tool to exclude all visits with the defined user-agent.
* `WINDOW_HEIGHT`: Window height for Selenium
* `WINDOW_WIDTH`: Window width for Selenium
* `MAX_WAIT_TIME`: max wait time for selenium waits
* `LOGIN_OPTIONS`: List of user login options (options: `normal`, `shibboleth`)
* `FILENAME_DATAVERSES`: name of created dataverse JSON file from utils collect
* `FILENAME_DATASETS`: name of created datasets JSON file from utils collect
* `FILENAME_DATAFILES`: name of created datafiles JSON file from utils collect
* `FILENAME_METADATA`: name of created metadata JSON file from utils collect
* `SHIBBOLETH_INSTITUTION`: name of Shibboleth institution for login purpose
* `SHIBBOLETH_LOGIN_PAGE_TITLE`: title of Shibboleth Login page
* `BUILTIN_USER_KEY`: Builtin user key to create users
* `DATA_COLLECTOR`: descriptor for data collector. It is ether 1. the user handle of dataverse user, which collected the data or 2. "public" for publicly accessible data
"""
BASE_URL: str
USER_FILENAME: str
VERSION: str
INSTANCE: str = ENV_FILE_BASE
HEADLESS: bool = True
USER_AGENT: str = "TESTING"
WINDOW_HEIGHT: int = 1400
WINDOW_WIDTH: int = 1600
MAX_WAIT_TIME: int = 20
LOGIN_OPTIONS: List[str] = ["normal"]
FILENAME_DATAVERSES: str = "dataverses.json"
FILENAME_DATASETS: str = "datasets.json"
FILENAME_DATAFILES: str = "datafiles.json"
FILENAME_METADATA: str = "metadata.json"
SHIBBOLETH_INSTITUTION: str = None
SHIBBOLETH_LOGIN_PAGE_TITLE: str = None
BUILTIN_USER_KEY: str = None
DATA_COLLECTOR: str = None
|
py | b40dc7809fb38ae257f48256cc5a99bdc7cac190 | from setuptools import setup
from linx import __version__
setup(
name="linxpy",
version=__version__,
py_modules=["linx"],
entry_points={
"console_scripts": [
"linx = linx:linx",
"unlinx = linx:unlinx",
],
},
install_requires=["requests>=2.4"],
description="A Python client for linx.li.",
license="WTFPL",
author="mutantmonkey",
author_email="[email protected]",
keywords="linx linxpy upload",
url="https://github.com/mutantmonkey/linxpy"
)
|
py | b40dc82f9556a1db715685b1b3e8e564a2e1ac15 | import datetime
try:
from dateutil.tz import tzutc, tzlocal
except ImportError: # pragma: no cover
raise ImportError(
'Using the datetime fields requires the dateutil library. '
'You can obtain dateutil from http://labix.org/python-dateutil'
)
from .base import DateTimeType
EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=tzutc())
class TimeStampType(DateTimeType):
"""Variant of a datetime field that saves itself as a unix timestamp (int)
instead of a ISO-8601 string.
"""
@classmethod
def timestamp_to_date(cls, value):
return datetime.datetime.fromtimestamp(value, tz=tzutc())
@classmethod
def date_to_timestamp(cls, value):
if value.tzinfo is None:
value = value.replace(tzinfo=tzlocal())
delta = value - EPOCH
return (delta.days * 24 * 3600) + delta.seconds + delta.microseconds
def to_primitive(self, value, context=None):
return TimeStampType.date_to_timestamp(value)
|
py | b40dc8ade98354ef901c763c2f73e2eeafa7a31d | #!/bin/python2
# Copyright (c) 2019 ZettaDB inc. All rights reserved.
# This source code is licensed under Apache 2.0 License,
# combined with Common Clause Condition 1.0, as detailed in the NOTICE file.
import sys
import json
import getpass
import re
import time
import uuid
import os
import os.path
import argparse
def addIpToMachineMap(map, ip, args):
if not map.has_key(ip):
mac={"ip":ip, "user":args.defuser, "basedir":args.defbase}
map[ip] = mac
def addMachineToMap(map, ip, user, basedir):
mac={"ip":ip, "user":user, "basedir":basedir}
map[ip] = mac
def addIpToFilesMap(map, ip, fname, targetdir):
if not map.has_key(ip):
map[ip] = {}
tmap = map[ip]
if not tmap.has_key(fname):
tmap[fname] = targetdir
def addNodeToFilesMap(map, node, fname, targetdir):
ip = node['ip']
addIpToFilesMap(map, ip, fname, targetdir)
def addNodeToIpset(set, node):
ip = node['ip']
set.add(ip)
# Not used currently.
def addToCommandsMap(map, ip, targetdir, command):
if not map.has_key(ip):
map[ip] = []
cmds = map[ip]
cmds.append([targetdir, command])
def addToCommandsList(cmds, ip, targetdir, command, envtype="no"):
lst = [ip, targetdir, command, envtype]
cmds.append(lst)
def addToDirMap(map, ip, newdir):
if not map.has_key(ip):
map[ip] = []
dirs = map[ip]
dirs.append(newdir)
def getuuid():
return str(uuid.uuid1())
def addPortToMachine(map, ip, port):
if not map.has_key(ip):
map[ip] = set([port])
else:
pset = map[ip]
if port in pset:
raise ValueError("duplicate port:%s on host:%s" % (str(port), ip))
else:
pset.add(port)
def addDirToMachine(map, ip, directory):
if not map.has_key(ip):
map[ip] = set([directory])
else:
dset = map[ip]
if directory in dset:
raise ValueError("duplicate directory:%s on host:%s" % (directory, ip))
else:
dset.add(directory)
def validate_config(jscfg):
cluster = jscfg['cluster']
meta = cluster['meta']
comps = cluster['comp']['nodes']
datas = cluster['data']
portmap = {}
dirmap = {}
metacnt = len(meta['nodes'])
if metacnt == 0:
raise ValueError('Error: There must be at least one node in meta shard')
hasPrimary=False
for node in meta['nodes']:
addPortToMachine(portmap, node['ip'], node['port'])
addPortToMachine(portmap, node['ip'], node['xport'])
addPortToMachine(portmap, node['ip'], node['mgr_port'])
addDirToMachine(dirmap, node['ip'], node['data_dir_path'])
addDirToMachine(dirmap, node['ip'], node['log_dir_path'])
if node.has_key('innodb_log_dir_path'):
addDirToMachine(dirmap, node['ip'], node['innodb_log_dir_path'])
if node.get('is_primary', False):
if hasPrimary:
raise ValueError('Error: Two primaries found in meta shard, there should be one and only one Primary specified !')
else:
hasPrimary = True
if metacnt > 1:
if not hasPrimary:
raise ValueError('Error: No primary found in meta shard, there should be one and only one primary specified !')
else:
node['is_primary'] = True
for node in comps:
addPortToMachine(portmap, node['ip'], node['port'])
addDirToMachine(dirmap, node['ip'], node['datadir'])
i=1
for shard in datas:
nodecnt = len(shard['nodes'])
if nodecnt == 0:
raise ValueError('Error: There must be at least one node in data shard%d' % i)
if nodecnt > 1 and metacnt == 1:
raise ValueError('Error: Meta shard has only one node, but data shard%d has two or more' % i)
elif nodecnt == 1 and metacnt > 1:
raise ValueError('Error: Meta shard has two or more node, but data shard%d has only one' % i)
hasPrimary=False
for node in shard['nodes']:
addPortToMachine(portmap, node['ip'], node['port'])
addPortToMachine(portmap, node['ip'], node['xport'])
addPortToMachine(portmap, node['ip'], node['mgr_port'])
addDirToMachine(dirmap, node['ip'], node['data_dir_path'])
addDirToMachine(dirmap, node['ip'], node['log_dir_path'])
if node.has_key('innodb_log_dir_path'):
addDirToMachine(dirmap, node['ip'], node['innodb_log_dir_path'])
if node.get('is_primary', False):
if hasPrimary:
raise ValueError('Error: Two primaries found in shard%d, there should be one and only one Primary specified !' % i)
else:
hasPrimary = True
if metacnt > 1:
if not hasPrimary:
raise ValueError('Error: No primary found in shard%d, there should be one and only one primary specified !' % i)
else:
node['is_primary'] = True
i+=1
def generate_haproxy_config(jscfg, machines, confname):
cluster = jscfg['cluster']
comps = cluster['comp']['nodes']
haproxy = cluster['haproxy']
mach = machines[haproxy['ip']]
maxconn = haproxy.get('maxconn', 10000)
conf = open(confname, 'w')
conf.write('''# generated automatically
global
pidfile %s/haproxy.pid
maxconn %d
daemon
defaults
log global
retries 5
timeout connect 5s
timeout client 30000s
timeout server 30000s
listen kunlun-cluster
bind :%d
mode tcp
balance roundrobin
''' % (mach['basedir'], maxconn, haproxy['port']))
i = 1
for node in comps:
conf.write(" server comp%d %s:%d weight 1 check inter 10s\n" % (i, node['ip'], node['port']))
i += 1
conf.close()
def generate_install_scripts(jscfg, args):
validate_config(jscfg)
installtype = args.installtype
sudopfx=""
if args.sudo:
sudopfx="sudo "
localip = '127.0.0.1'
machines = {}
for mach in jscfg['machines']:
ip=mach['ip']
user=mach.get('user', args.defuser)
base=mach.get('basedir', args.defbase)
addMachineToMap(machines, ip, user, base)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
filesmap = {}
commandslist = []
dirmap = {}
usemgr=True
cluster = jscfg['cluster']
cluster_name = cluster['name']
meta = cluster['meta']
usemgr=False
metacnt = len(meta['nodes'])
if metacnt > 1:
usemgr = True
if not meta.has_key('group_uuid'):
meta['group_uuid'] = getuuid()
my_metaname = 'mysql_meta.json'
metaf = open(r'install/%s' % my_metaname,'w')
json.dump(meta, metaf, indent=4)
metaf.close()
cmdpat = '%spython2 install-mysql.py --config=./%s --target_node_index=%d --cluster_id=%s --shard_id=%s'
# commands like:
# python2 install-mysql.py --config=./mysql_meta.json --target_node_index=0
targetdir='%s/dba_tools' % storagedir
i=0
pries = []
secs = []
shard_id = "meta"
for node in meta['nodes']:
addNodeToFilesMap(filesmap, node, my_metaname, targetdir)
addIpToMachineMap(machines, node['ip'], args)
cmd = cmdpat % (sudopfx, my_metaname, i, cluster_name, shard_id)
if node.get('is_primary', False):
pries.append([node['ip'], targetdir, cmd])
else:
secs.append([node['ip'], targetdir, cmd])
addToDirMap(dirmap, node['ip'], node['data_dir_path'])
addToDirMap(dirmap, node['ip'], node['log_dir_path'])
if node.has_key('innodb_log_dir_path'):
addToDirMap(dirmap, node['ip'], node['innodb_log_dir_path'])
i+=1
datas = cluster['data']
i=1
for shard in datas:
if not shard.has_key('group_uuid'):
shard['group_uuid'] = getuuid()
shard_id = "shard%d" % i
my_shardname = "mysql_shard%d.json" % i
shardf = open(r'install/%s' % my_shardname, 'w')
json.dump(shard, shardf, indent=4)
shardf.close()
j = 0
for node in shard['nodes']:
addNodeToFilesMap(filesmap, node, my_shardname, targetdir)
addIpToMachineMap(machines, node['ip'], args)
cmd = cmdpat % (sudopfx, my_shardname, j, cluster_name, shard_id)
if node.get('is_primary', False):
pries.append([node['ip'], targetdir, cmd])
else:
secs.append([node['ip'], targetdir, cmd])
addToDirMap(dirmap, node['ip'], node['data_dir_path'])
addToDirMap(dirmap, node['ip'], node['log_dir_path'])
if node.has_key('innodb_log_dir_path'):
addToDirMap(dirmap, node['ip'], node['innodb_log_dir_path'])
j += 1
i+=1
extraopt = " "
if not usemgr:
extraopt = " --ha_mode=no_rep"
for item in pries:
addToCommandsList(commandslist, item[0], item[1], item[2] + extraopt)
for item in secs:
addToCommandsList(commandslist, item[0], item[1], item[2] + extraopt)
# This only needs to transfered to machine creating the cluster.
pg_metaname = 'postgres_meta.json'
metaf = open(r'install/%s' % pg_metaname, 'w')
objs = []
for node in meta['nodes']:
obj = {}
obj['ip'] = node['ip']
obj['port'] = node['port']
obj['user'] = "pgx"
obj['password'] = "pgx_pwd"
objs.append(obj)
json.dump(objs, metaf, indent=4)
metaf.close()
# This only needs to transfered to machine creating the cluster.
pg_shardname = 'postgres_shards.json'
shardf = open(r'install/%s' % pg_shardname, 'w')
shards = []
i=1
for shard in datas:
obj={'shard_name': "shard%d" % i}
i+=1
nodes=[]
for node in shard['nodes']:
n={'user':'pgx', 'password':'pgx_pwd'}
n['ip'] = node['ip']
n['port'] = node['port']
nodes.append(n)
obj['shard_nodes'] = nodes
shards.append(obj)
json.dump(shards, shardf, indent=4)
shardf.close()
comps = cluster['comp']['nodes']
pg_compname = 'postgres_comp.json'
compf = open(r'install/%s' % pg_compname, 'w')
json.dump(comps, compf, indent=4)
compf.close()
# python2 install_pg.py --config=docker-comp.json --install_ids=1,2,3
targetdir="%s/scripts" % serverdir
for node in comps:
addNodeToFilesMap(filesmap, node, pg_compname, targetdir)
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'python2 install_pg.py --config=./%s --install_ids=%d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (pg_compname, node['id']))
addToDirMap(dirmap, node['ip'], node['datadir'])
comp1 = comps[0]
addNodeToFilesMap(filesmap, comp1, pg_metaname, targetdir)
addNodeToFilesMap(filesmap, comp1, pg_shardname, targetdir)
resourcedir = "%s/resources" % serverdir
cmdpat=r'/bin/bash build_driver.sh'
addToCommandsList(commandslist, comp1['ip'], resourcedir, cmdpat, "all")
cmdpat=r'python2 bootstrap.py --config=./%s --bootstrap_sql=./meta_inuse.sql'
addToCommandsList(commandslist, comp1['ip'], targetdir, cmdpat % pg_metaname, "storage")
cmdpat='python2 create_cluster.py --shards_config=./%s \
--comps_config=./%s --meta_config=./%s --cluster_name=%s --cluster_owner=abc --cluster_biz=test'
if not usemgr:
cmdpat = cmdpat + " --ha_mode=no_rep"
addToCommandsList(commandslist, comp1['ip'], targetdir,
cmdpat % (pg_shardname, pg_compname, pg_metaname, cluster_name), "all")
# bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid clustermgr.cnf >& run.log </dev/null &
mgr_name = 'clustermgr.cnf'
os.system('mkdir -p install')
mgrf = open(r'install/%s' % mgr_name, 'w')
mgrtempf = open(r'clustermgr.cnf.template','r')
firstmeta = meta['nodes'][0]
for line in mgrtempf:
newline = re.sub('META_HOST', firstmeta['ip'], line)
newline = re.sub('META_PORT', str(firstmeta['port']), newline)
mgrf.write(newline)
mgrtempf.close()
mgrf.close()
targetdir=clustermgrdir
addIpToMachineMap(machines, cluster['clustermgr']['ip'], args)
addIpToFilesMap(filesmap, cluster['clustermgr']['ip'], mgr_name, targetdir)
cmdpat = r'bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid %s >& run.log </dev/null &'
addToCommandsList(commandslist, cluster['clustermgr']['ip'], targetdir, cmdpat % mgr_name, "clustermgr")
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
addIpToMachineMap(machines, haproxy['ip'], args)
generate_haproxy_config(jscfg, machines, 'install/haproxy.cfg')
cmdpat = r'haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg >& haproxy.log'
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
com_name = 'commands.sh'
comf = open(r'install/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
# files copy.
for ip in machines:
mach = machines.get(ip)
if args.sudo:
mkstr = "bash remote_run.sh --user=%s %s 'sudo mkdir -p %s && sudo chown -R %s:`id -gn %s` %s'\n"
tup= (mach['user'], ip, mach['basedir'], mach['user'], mach['user'], mach['basedir'])
else:
mkstr = "bash remote_run.sh --user=%s %s 'mkdir -p %s'\n"
tup= (mach['user'], ip, mach['basedir'])
comf.write(mkstr % tup)
# Set up the files
if installtype == 'full':
comstr = "bash dist.sh --hosts=%s --user=%s %s %s\n"
comf.write(comstr % (ip, mach['user'], '%s.tgz' % storagedir, mach['basedir']))
comf.write(comstr % (ip, mach['user'], '%s.tgz' % serverdir, mach['basedir']))
comf.write(comstr % (ip, mach['user'], '%s.tgz' % clustermgrdir, mach['basedir']))
if cluster.has_key('haproxy'):
comf.write(comstr % (ip, mach['user'], 'haproxy-2.5.0-bin.tar.gz', mach['basedir']))
extstr = "bash remote_run.sh --user=%s %s 'cd %s && tar -xzf %s'\n"
comf.write(extstr % (mach['user'], ip, mach['basedir'], '%s.tgz' % storagedir))
comf.write(extstr % (mach['user'], ip, mach['basedir'], '%s.tgz' % serverdir))
comf.write(extstr % (mach['user'], ip, mach['basedir'], '%s.tgz' % clustermgrdir))
if cluster.has_key('haproxy'):
comf.write(extstr % (mach['user'], ip, mach['basedir'], 'haproxy-2.5.0-bin.tar.gz'))
# files
fmap = {'build_driver.sh': '%s/resources' % serverdir, 'process_deps.sh': '.'}
if cluster.has_key('haproxy'):
fmap['haproxy.cfg'] = '.'
for fname in fmap:
comstr = "bash dist.sh --hosts=%s --user=%s install/%s %s/%s\n"
tup=(ip, mach['user'], fname, mach['basedir'], fmap[fname])
comf.write(comstr % tup)
comstr = "bash remote_run.sh --user=%s %s 'cd %s/%s || exit 1; test -d etc && echo > etc/instances_list.txt 2>/dev/null; exit 0'\n"
comf.write(comstr % (mach['user'], ip, mach['basedir'], serverdir))
comstr = "bash remote_run.sh --user=%s %s 'cd %s/%s || exit 1; test -d etc && echo > etc/instances_list.txt 2>/dev/null; exit 0'\n"
comf.write(comstr % (mach['user'], ip, mach['basedir'], storagedir))
# Set up the env.sh
comstr = "bash dist.sh --hosts=%s --user=%s env.sh.template %s\n"
extstr = ''' bash remote_run.sh --user=%s %s "cd %s && sed -s 's#KUNLUN_BASEDIR#%s#g' env.sh.template > env.sh" '''
tup=(ip, mach['user'], mach['basedir'])
exttup=(mach['user'], ip, mach['basedir'], mach['basedir'])
comf.write(comstr % tup)
comf.write(extstr % exttup)
comf.write("\n")
extstr = ''' bash remote_run.sh --user=%s %s "cd %s && sed -i 's#KUNLUN_VERSION#%s#g' env.sh" '''
exttup=(mach['user'], ip, mach['basedir'], args.product_version)
comf.write(extstr % exttup)
comf.write("\n")
comstr = "bash remote_run.sh --user=%s %s 'cd %s && envtype=storage && source ./env.sh && cd %s/lib && bash ../../process_deps.sh'\n"
comf.write(comstr % (mach['user'], ip, mach['basedir'], storagedir))
comstr = "bash remote_run.sh --user=%s %s 'cd %s && envtype=computing && source ./env.sh && cd %s/lib && bash ../../process_deps.sh'\n"
comf.write(comstr % (mach['user'], ip, mach['basedir'], serverdir))
comstr = "bash remote_run.sh --user=%s %s 'cd %s && envtype=clustermgr && source ./env.sh && cd %s/lib && bash ../../process_deps.sh'\n"
comf.write(comstr % (mach['user'], ip, mach['basedir'], clustermgrdir))
# dir making
for ip in dirmap:
mach = machines.get(ip)
dirs=dirmap[ip]
for d in dirs:
if args.sudo:
mkstr = "bash remote_run.sh --user=%s %s 'sudo mkdir -p %s && sudo chown -R %s:`id -gn %s` %s'\n"
tup= (mach['user'], ip, d, mach['user'], mach['user'], d)
else:
mkstr = "bash remote_run.sh --user=%s %s 'mkdir -p %s'\n"
tup= (mach['user'], ip, d)
comf.write(mkstr % tup)
# files copy.
for ip in filesmap:
mach = machines.get(ip)
# files
fmap = filesmap[ip]
for fname in fmap:
comstr = "bash dist.sh --hosts=%s --user=%s install/%s %s/%s\n"
tup=(ip, mach['user'], fname, mach['basedir'], fmap[fname])
comf.write(comstr % tup)
# The reason for not using commands map is that,
# we need to keep the order for the commands.
for cmd in commandslist:
ip=cmd[0]
mach = machines[ip]
mkstr = "bash remote_run.sh --user=%s %s $'cd %s && envtype=%s && source ./env.sh && cd %s || exit 1; %s'\n"
tup= (mach['user'], ip, mach['basedir'], cmd[3], cmd[1], cmd[2])
comf.write(mkstr % tup)
comf.close()
# The order is meta shard -> data shards -> cluster_mgr -> comp nodes
def generate_start_scripts(jscfg, args):
sudopfx=""
if args.sudo:
sudopfx="sudo "
localip = '127.0.0.1'
machines = {}
for mach in jscfg['machines']:
ip=mach['ip']
user=mach.get('user', args.defuser)
base=mach.get('basedir', args.defbase)
addMachineToMap(machines, ip, user, base)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
filesmap = {}
commandslist = []
cluster = jscfg['cluster']
meta = cluster['meta']
# commands like:
# bash startmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
for node in meta['nodes']:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'%sbash startmysql.sh %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['port']))
# bash startmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
datas = cluster['data']
for shard in datas:
for node in shard['nodes']:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'%sbash startmysql.sh %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['port']))
# bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid clustermgr.cnf >& run.log </dev/null &
addIpToMachineMap(machines, cluster['clustermgr']['ip'], args)
mgr_name = 'clustermgr.cnf'
targetdir=clustermgrdir
cmdpat = r'bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid %s >& run.log </dev/null &'
addToCommandsList(commandslist, cluster['clustermgr']['ip'], targetdir, cmdpat % mgr_name, "clustermgr")
# su postgres -c "python2 start_pg.py port=5401"
comps = cluster['comp']['nodes']
targetdir="%s/scripts" % serverdir
for node in comps:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'python2 start_pg.py port=%d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "computing")
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
addIpToMachineMap(machines, haproxy['ip'], args)
cmdpat = r'haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg >& haproxy.log'
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
com_name = 'commands.sh'
os.system('mkdir -p start')
comf = open(r'start/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
for cmd in commandslist:
ip=cmd[0]
mach = machines[ip]
mkstr = "bash remote_run.sh --user=%s %s $'cd %s && envtype=%s && source ./env.sh && cd %s || exit 1; %s'\n"
tup= (mach['user'], ip, mach['basedir'], cmd[3], cmd[1], cmd[2])
comf.write(mkstr % tup)
comf.close()
# The order is: comp-nodes -> cluster_mgr -> data shards -> meta shard
def generate_stop_scripts(jscfg, args):
localip = '127.0.0.1'
machines = {}
for mach in jscfg['machines']:
ip=mach['ip']
user=mach.get('user', args.defuser)
base=mach.get('basedir', args.defbase)
addMachineToMap(machines, ip, user, base)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
commandslist = []
cluster = jscfg['cluster']
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
addIpToMachineMap(machines, haproxy['ip'], args)
cmdpat="cat haproxy.pid | xargs kill -9"
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
# pg_ctl -D %s stop"
comps = cluster['comp']['nodes']
targetdir="%s/scripts" % serverdir
for node in comps:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'pg_ctl -D %s stop -m immediate'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['datadir'], "computing")
# bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid --stop
addIpToMachineMap(machines, cluster['clustermgr']['ip'], args)
targetdir=clustermgrdir
cmdpat = r"bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid --stop"
addToCommandsList(commandslist, cluster['clustermgr']['ip'], targetdir, cmdpat, "clustermgr")
# bash stopmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
datas = cluster['data']
for shard in datas:
for node in shard['nodes']:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
meta = cluster['meta']
# commands like:
# mysqladmin --defaults-file=/kunlun/kunlun-storage-$version/etc/my_6001.cnf -uroot -proot shutdown
targetdir='%s/dba_tools' % storagedir
for node in meta['nodes']:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
com_name = 'commands.sh'
os.system('mkdir -p stop')
comf = open(r'stop/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
for cmd in commandslist:
ip=cmd[0]
mach = machines[ip]
mkstr = "bash remote_run.sh --user=%s %s $'cd %s && envtype=%s && source ./env.sh && cd %s || exit 1; %s'\n"
tup= (mach['user'], ip, mach['basedir'], cmd[3], cmd[1], cmd[2])
comf.write(mkstr % tup)
comf.close()
# The order is: comp-nodes -> cluster_mgr -> data shards -> meta shard
def generate_clean_scripts(jscfg, args):
sudopfx=""
if args.sudo:
sudopfx="sudo "
cleantype = args.cleantype
localip = '127.0.0.1'
machines = {}
for mach in jscfg['machines']:
ip=mach['ip']
user=mach.get('user', args.defuser)
base=mach.get('basedir', args.defbase)
addMachineToMap(machines, ip, user, base)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
commandslist = []
cluster = jscfg['cluster']
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
addIpToMachineMap(machines, haproxy['ip'], args)
cmdpat="cat haproxy.pid | xargs kill -9"
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
cmdpat="rm -f haproxy.pid"
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
# pg_ctl -D %s stop"
comps = cluster['comp']['nodes']
targetdir="%s/scripts" % serverdir
for node in comps:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'pg_ctl -D %s stop -m immediate'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['datadir'], "computing")
cmdpat = r'%srm -fr %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['datadir']))
# bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid --stop
addIpToMachineMap(machines, cluster['clustermgr']['ip'], args)
targetdir=clustermgrdir
cmdpat = r"bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid --stop"
addToCommandsList(commandslist, cluster['clustermgr']['ip'], targetdir, cmdpat, "clustermgr")
# bash stopmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
datas = cluster['data']
for shard in datas:
for node in shard['nodes']:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
cmdpat = r'%srm -fr %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['log_dir_path']))
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['data_dir_path']))
if node.has_key('innodb_log_dir_path'):
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['innodb_log_dir_path']))
meta = cluster['meta']
# commands like:
# mysqladmin --defaults-file=/kunlun/kunlun-storage-$version/etc/my_6001.cnf -uroot -proot shutdown
targetdir='%s/dba_tools' % storagedir
for node in meta['nodes']:
addIpToMachineMap(machines, node['ip'], args)
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
cmdpat = r'%srm -fr %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['log_dir_path']))
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['data_dir_path']))
if node.has_key('innodb_log_dir_path'):
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['innodb_log_dir_path']))
if cleantype == 'full':
for ip in machines:
mach =machines[ip]
cmdpat = '%srm -fr %s/*'
addToCommandsList(commandslist, ip, "/", cmdpat % (sudopfx, mach['basedir']))
com_name = 'commands.sh'
os.system('mkdir -p clean')
comf = open(r'clean/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
for cmd in commandslist:
ip=cmd[0]
mach = machines[ip]
mkstr = "bash remote_run.sh --user=%s %s $'cd %s && envtype=%s && source ./env.sh && cd %s || exit 1; %s'\n"
tup= (mach['user'], ip, mach['basedir'], cmd[3], cmd[1], cmd[2])
comf.write(mkstr % tup)
comf.close()
def checkdirs(dirs):
for d in dirs:
if not os.path.exists(d):
os.mkdir(d)
if __name__ == '__main__':
actions=["install", "start", "stop", "clean"]
parser = argparse.ArgumentParser(description='Specify the arguments.')
parser.add_argument('--action', type=str, help="The action", required=True, choices=actions)
parser.add_argument('--config', type=str, help="The config path", required=True)
parser.add_argument('--defuser', type=str, help="the command", default=getpass.getuser())
parser.add_argument('--defbase', type=str, help="the command", default='/kunlun')
parser.add_argument('--installtype', type=str, help="the install type", default='full', choices=['full', 'cluster'])
parser.add_argument('--cleantype', type=str, help="the clean type", default='full', choices=['full', 'cluster'])
parser.add_argument('--sudo', help="whether to use sudo", default=False, action='store_true')
parser.add_argument('--product_version', type=str, help="kunlun version", default='0.9.1')
args = parser.parse_args()
checkdirs(actions)
print str(sys.argv)
jsconf = open(args.config)
jstr = jsconf.read()
jscfg = json.loads(jstr)
jsconf.close()
# print str(jscfg)
if args.action == 'install':
generate_install_scripts(jscfg, args)
elif args.action == 'start':
generate_start_scripts(jscfg, args)
elif args.action == 'stop':
generate_stop_scripts(jscfg, args)
elif args.action == 'clean':
generate_clean_scripts(jscfg, args)
else :
usage()
sys.exit(1)
|
py | b40dc91fc4d8f645560e83ad0293a8f9106e0bff | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
|
py | b40dc99feb1fccb6c26efe0e7082c7b3b06660f3 | # encoding: utf-8
from decimal import Decimal
from datetime import datetime
from psi.app import const
from psi.app.models.data_security_mixin import DataSecurityMixin
from psi.app.service import Info
from psi.app.utils.format_util import format_decimal
from sqlalchemy import Column, Integer, ForeignKey, Numeric, Text, DateTime, select, func
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref, relationship
db = Info.get_db()
class Shipping(db.Model, DataSecurityMixin):
__tablename__ = 'shipping'
id = Column(Integer, primary_key=True)
date = Column(DateTime, nullable=False)
remark = Column(Text)
status_id = Column(Integer, ForeignKey('enum_values.id'), nullable=False)
status = relationship('EnumValues', foreign_keys=[status_id])
type_id = Column(Integer, ForeignKey('enum_values.id'), nullable=False)
type = relationship('EnumValues', foreign_keys=[type_id])
sales_order_id = Column(Integer, ForeignKey('sales_order.id'), nullable=False)
sales_order = relationship('SalesOrder', backref=backref('so_shipping', uselist=False))
inventory_transaction_id = Column(Integer, ForeignKey('inventory_transaction.id'), nullable=True)
inventory_transaction = relationship('InventoryTransaction',
backref=backref('it_shipping', uselist=False, ))
organization_id = db.Column(Integer, ForeignKey('organization.id'))
organization = relationship('Organization', foreign_keys=[organization_id])
@staticmethod
def status_filter():
from psi.app.models.enum_values import EnumValues
return EnumValues.type_filter(const.SHIPPING_STATUS_KEY)
@hybrid_property
def total_amount(self):
return format_decimal(Decimal(sum(line.total_amount for line in self.lines)))
@total_amount.expression
def total_amount(self):
return (select([func.sum(ShippingLine.price * ShippingLine.quantity)])
.where(self.id == ShippingLine.shipping_id).label('total_amount'))
@total_amount.setter
def total_amount(self, value):
pass
@staticmethod
def filter_by_so_id(so_id):
return Info.get_db().session.query(Shipping).filter_by(sales_order_id=so_id).all()
def __unicode__(self):
return str(self.id) + ' - ' + str(self.total_amount)
def create_or_update_inventory_transaction(self):
from psi.app.models.inventory_transaction import InventoryTransactionLine, InventoryTransaction
from psi.app.models.enum_values import EnumValues
if self.type.code == const.DIRECT_SHIPPING_TYPE_KEY:
it_type = EnumValues.get(const.SALES_OUT_INV_TRANS_TYPE_KEY)
else:
it_type = EnumValues.get(const.FRANCHISE_SALES_OUT_INV_TRANS_TYPE_KEY)
it = self.inventory_transaction
if it is None:
it = InventoryTransaction()
it.type = it_type
self.inventory_transaction = it
it.date = self.date
it.organization = self.organization
for line in self.lines:
itl = line.inventory_transaction_line
if itl is None:
itl = InventoryTransactionLine()
itl.quantity = -line.quantity
itl.product = line.product
itl.price = line.price
itl.in_transit_quantity = 0
itl.inventory_transaction = it
line.inventory_transaction_line = itl
self.update_saleable_qty_in_purchase_inv_lines(line)
Info.get_db().session.add(it)
def update_saleable_qty_in_purchase_inv_lines(self, ship_line):
from psi.app.models import InventoryTransactionLine, InventoryInOutLink
avail_inv_trans = Info.get_db().session.query(InventoryTransactionLine) \
.filter(InventoryTransactionLine.saleable_quantity > 0,
InventoryTransactionLine.product_id == ship_line.product.id) \
.order_by(InventoryTransactionLine.id).all()
to_update_purchase_inventory_line, inventory_in_out_links = [],[]
for recv_iv_trans in avail_inv_trans:
remain_qty = ship_line.quantity
if recv_iv_trans.saleable_quantity >= ship_line.quantity:
recv_iv_trans.saleable_quantity = recv_iv_trans.saleable_quantity \
- ship_line.quantity
remain_qty = 0
else:
recv_iv_trans.saleable_quantity = 0
remain_qty = ship_line.quantity \
- recv_iv_trans.saleable_quantity
link = InventoryInOutLink()
link.date = datetime.now()
link.product = ship_line.product
link.in_price = recv_iv_trans.price
link.in_date = recv_iv_trans.itl_receiving_line.receiving.date
link.receiving_line_id = recv_iv_trans.itl_receiving_line.id
link.out_price = ship_line.price
link.out_date = ship_line.shipping.date
link.out_quantity = ship_line.quantity
link.shipping_line = ship_line
link.organization = ship_line.shipping.organization
to_update_purchase_inventory_line.append(recv_iv_trans)
inventory_in_out_links.append(link)
if remain_qty == 0:
break
for l in to_update_purchase_inventory_line:
Info.get_db().session.add(l)
for l in inventory_in_out_links:
Info.get_db().session.add(l)
class ShippingLine(db.Model):
__tablename = 'shipping_line'
id = Column(Integer, primary_key=True)
quantity = Column(Numeric(precision=8, scale=2, decimal_return_scale=2), nullable=False)
price = Column(Numeric(precision=8, scale=2, decimal_return_scale=2), nullable=False)
product_id = Column(Integer, ForeignKey('product.id'), nullable=False)
product = relationship('Product', backref=backref('shipping_lines'))
shipping_id = Column(Integer, ForeignKey('shipping.id'), nullable=False)
shipping = relationship('Shipping', backref=backref('lines', uselist=True, cascade='all, delete-orphan'))
sales_order_line_id = Column(Integer, ForeignKey('sales_order_line.id'), nullable=False)
sales_order_line = relationship('SalesOrderLine', backref=backref('sol_shipping_line', uselist=False, ))
inventory_transaction_line_id = Column(Integer, ForeignKey('inventory_transaction_line.id'), nullable=True)
inventory_transaction_line = relationship('InventoryTransactionLine', backref=backref('itl_shipping_line',
uselist=False, ))
def __repr__(self):
return "{0:s}{1:f}个(价格{2:f}元)".format(self.product.name, self.quantity, self.price)
@hybrid_property
def total_amount(self):
if self.quantity is None:
q = 0
else:
q = self.quantity
return format_decimal(Decimal(self.price * q))
@total_amount.setter
def total_amount(self, val):
pass
@total_amount.expression
def total_amount(self):
return select([self.price * self.quantity]).label('line_total_amount')
@total_amount.setter
def total_amount(self, value):
pass
|
py | b40dca3594d9749c51e125faa208115b31d295d4 | #/usr/bin/python
from __future__ import print_function
import argparse
import torch
import pickle
import numpy as np
import os
import math
import random
import sys
import matplotlib.pyplot as plt
import data
import scipy.io
from torch import nn, optim
from torch.nn import functional as F
from etm import ETM
from utils import nearest_neighbors, get_topic_coherence, get_topic_diversity
parser = argparse.ArgumentParser(description='The Embedded Topic Model')
### data and file related arguments
parser.add_argument('--dataset', type=str, default='20ng', help='name of corpus')
parser.add_argument('--data_path', type=str, default='data/20ng', help='directory containing data')
parser.add_argument('--emb_path', type=str, default='data/20ng_embeddings.txt', help='directory containing word embeddings')
parser.add_argument('--save_path', type=str, default='./results', help='path to save results')
parser.add_argument('--batch_size', type=int, default=1000, help='input batch size for training')
### model-related arguments
parser.add_argument('--num_topics', type=int, default=50, help='number of topics')
parser.add_argument('--rho_size', type=int, default=300, help='dimension of rho')
parser.add_argument('--emb_size', type=int, default=300, help='dimension of embeddings')
parser.add_argument('--t_hidden_size', type=int, default=800, help='dimension of hidden space of q(theta)')
parser.add_argument('--theta_act', type=str, default='relu', help='tanh, softplus, relu, rrelu, leakyrelu, elu, selu, glu)')
parser.add_argument('--train_embeddings', type=int, default=0, help='whether to fix rho or train it')
### optimization-related arguments
parser.add_argument('--lr', type=float, default=0.005, help='learning rate')
parser.add_argument('--lr_factor', type=float, default=4.0, help='divide learning rate by this...')
parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train...150 for 20ng 100 for others')
parser.add_argument('--mode', type=str, default='train', help='train or eval model')
parser.add_argument('--optimizer', type=str, default='adam', help='choice of optimizer')
parser.add_argument('--seed', type=int, default=2019, help='random seed (default: 1)')
parser.add_argument('--enc_drop', type=float, default=0.0, help='dropout rate on encoder')
parser.add_argument('--clip', type=float, default=0.0, help='gradient clipping')
parser.add_argument('--nonmono', type=int, default=10, help='number of bad hits allowed')
parser.add_argument('--wdecay', type=float, default=1.2e-6, help='some l2 regularization')
parser.add_argument('--anneal_lr', type=int, default=0, help='whether to anneal the learning rate or not')
parser.add_argument('--bow_norm', type=int, default=1, help='normalize the bows or not')
### evaluation, visualization, and logging-related arguments
parser.add_argument('--num_words', type=int, default=10, help='number of words for topic viz')
parser.add_argument('--log_interval', type=int, default=2, help='when to log training')
parser.add_argument('--visualize_every', type=int, default=10, help='when to visualize results')
parser.add_argument('--eval_batch_size', type=int, default=1000, help='input batch size for evaluation')
parser.add_argument('--load_from', type=str, default='', help='the name of the ckpt to eval from')
parser.add_argument('--tc', type=int, default=0, help='whether to compute topic coherence or not')
parser.add_argument('--td', type=int, default=0, help='whether to compute topic diversity or not')
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('\n')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
## get data
# 1. vocabulary
vocab, train, valid, test = data.get_data(os.path.join(args.data_path))
vocab_size = len(vocab)
args.vocab_size = vocab_size
# 1. training data
train_tokens = train['tokens']
train_counts = train['counts']
args.num_docs_train = len(train_tokens)
# 2. dev set
valid_tokens = valid['tokens']
valid_counts = valid['counts']
args.num_docs_valid = len(valid_tokens)
# 3. test data
test_tokens = test['tokens']
test_counts = test['counts']
args.num_docs_test = len(test_tokens)
test_1_tokens = test['tokens_1']
test_1_counts = test['counts_1']
args.num_docs_test_1 = len(test_1_tokens)
test_2_tokens = test['tokens_2']
test_2_counts = test['counts_2']
args.num_docs_test_2 = len(test_2_tokens)
emb_type = 'none' #bert
embeddings = None
if(emb_type=='w2v'):
from gensim.models import Word2Vec
model = Word2Vec.load("/content/word2vec/w2v_10eps_model.model")
vectors=model.wv
print('loaded')
elif(emb_type=='bert'):
from sentence_transformers import SentenceTransformer
from sentence_transformers import models, losses
import scipy.spatial
import pickle as pkl
word_embedding_model = models.BERT("/content/models")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
import pandas as pd
from tqdm import tqdm
#if not args.train_embeddings:
emb_path = args.emb_path
embeddings = np.zeros((vocab_size, args.emb_size))
words_found = 0
errors=0
if(emb_type=='bert'):
data_embs=[]
batch=[]
for i, word in enumerate(vocab):
batch.append(word)
if(i%500==0):
print(i)
embs=model.encode(batch,show_progress_bar=False)
for e in embs:
data_embs.append(e)
batch=[]
embs=model.encode(batch,show_progress_bar=False)
for e in embs:
data_embs.append(e)
if(emb_type=='bert'):
import numpy as np
from sklearn.decomposition import PCA
pca = PCA(n_components=300)
data_embs=pca.fit_transform(data_embs)
print(pca.explained_variance_ratio_.cumsum())
if(emb_type=='w2v' or emb_type=='bert'):
for i, word in enumerate(vocab):
try:
if(emb_type=='w2v'):
embeddings[i] = vectors[word]
elif(emb_type=='bert'):
embeddings[i] = data_embs[i]
words_found += 1
except KeyError:
errors+=1
embeddings[i] = np.random.normal(scale=0.6, size=(args.emb_size, ))
embeddings = torch.from_numpy(embeddings).to(device)
args.embeddings_dim = embeddings.size()
vectors=[]
model=[]
print('errors: ',errors)
print('words_found: ',words_found)
print('=*'*100)
print('Training an Embedded Topic Model on {} with the following settings: {}'.format(args.dataset.upper(), args))
print('=*'*100)
## define checkpoint
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
if args.mode == 'eval':
ckpt = args.load_from
else:
ckpt = os.path.join(args.save_path,
'etm_{}_K_{}_Htheta_{}_Optim_{}_Clip_{}_ThetaAct_{}_Lr_{}_Bsz_{}_RhoSize_{}_trainEmbeddings_{}'.format(
args.dataset, args.num_topics, args.t_hidden_size, args.optimizer, args.clip, args.theta_act,
args.lr, args.batch_size, args.rho_size, args.train_embeddings))
## define model and optimizer
model = ETM(args.num_topics, vocab_size, args.t_hidden_size, args.rho_size, args.emb_size,
args.theta_act, embeddings, args.train_embeddings, args.enc_drop).to(device)
print('model: {}'.format(model))
if args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adadelta':
optimizer = optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'asgd':
optimizer = optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
print('Defaulting to vanilla SGD')
optimizer = optim.SGD(model.parameters(), lr=args.lr)
def train(epoch):
model.train()
acc_loss = 0
acc_kl_theta_loss = 0
cnt = 0
indices = torch.randperm(args.num_docs_train)
indices = torch.split(indices, args.batch_size)
for idx, ind in enumerate(indices):
optimizer.zero_grad()
model.zero_grad()
data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)
sums = data_batch.sum(1).unsqueeze(1)
if args.bow_norm:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
recon_loss, kld_theta = model(data_batch, normalized_data_batch)
total_loss = recon_loss + kld_theta
total_loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
acc_loss += torch.sum(recon_loss).item()
acc_kl_theta_loss += torch.sum(kld_theta).item()
cnt += 1
if idx % args.log_interval == 0 and idx > 0:
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('Epoch: {} .. batch: {}/{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, idx, len(indices), optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('*'*100)
print('Epoch----->{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))
print('*'*100)
def visualize(m, show_emb=True):
if not os.path.exists('./results'):
os.makedirs('./results')
m.eval()
#queries = ['andrew', 'computer', 'sports', 'religion', 'man', 'love',
# 'intelligence', 'money', 'politics', 'health', 'people', 'family']
queries = ['plantar','evolucao','gene','diversidade']
## visualize topics using monte carlo
with torch.no_grad():
print('#'*100)
print('Visualize topics...')
topics_words = []
gammas = m.get_beta()
for k in range(args.num_topics):
gamma = gammas[k]
top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])
topic_words = [vocab[a] for a in top_words]
topics_words.append(' '.join(topic_words))
print('Topic {}: {}'.format(k, topic_words))
if show_emb:
## visualize word embeddings by using V to get nearest neighbors
print('#'*100)
print('Visualize word embeddings by using output embedding matrix')
try:
embeddings = m.rho.weight # Vocab_size x E
except:
embeddings = m.rho # Vocab_size x E
neighbors = []
for word in queries:
print('word: {} .. neighbors: {}'.format(
word, nearest_neighbors(word, embeddings, vocab)))
print('#'*100)
def evaluate(m, source, tc=False, td=False):
"""Compute perplexity on document completion.
"""
m.eval()
with torch.no_grad():
if source == 'val':
indices = torch.split(torch.tensor(range(args.num_docs_valid)), args.eval_batch_size)
tokens = valid_tokens
counts = valid_counts
else:
indices = torch.split(torch.tensor(range(args.num_docs_test)), args.eval_batch_size)
tokens = test_tokens
counts = test_counts
## get \beta here
beta = m.get_beta()
### do dc and tc here
acc_loss = 0
cnt = 0
indices_1 = torch.split(torch.tensor(range(args.num_docs_test_1)), args.eval_batch_size)
for idx, ind in enumerate(indices_1):
## get theta from first half of docs
data_batch_1 = data.get_batch(test_1_tokens, test_1_counts, ind, args.vocab_size, device)
sums_1 = data_batch_1.sum(1).unsqueeze(1)
if args.bow_norm:
normalized_data_batch_1 = data_batch_1 / sums_1
else:
normalized_data_batch_1 = data_batch_1
theta, _ = m.get_theta(normalized_data_batch_1)
## get prediction loss using second half
data_batch_2 = data.get_batch(test_2_tokens, test_2_counts, ind, args.vocab_size, device)
sums_2 = data_batch_2.sum(1).unsqueeze(1)
res = torch.mm(theta, beta)
preds = torch.log(res)
recon_loss = -(preds * data_batch_2).sum(1)
loss = recon_loss / sums_2.squeeze()
loss = loss.mean().item()
acc_loss += loss
cnt += 1
cur_loss = acc_loss / cnt
ppl_dc = round(math.exp(cur_loss), 1)
print('*'*100)
print('{} Doc Completion PPL: {}'.format(source.upper(), ppl_dc))
print('*'*100)
if tc or td:
beta = beta.data.cpu().numpy()
if tc:
print('Computing topic coherence...')
get_topic_coherence(beta, train_tokens, vocab)
if td:
print('Computing topic diversity...')
get_topic_diversity(beta, 25)
return ppl_dc
if args.mode == 'train':
## train model on data
best_epoch = 0
best_val_ppl = 1e9
all_val_ppls = []
print('\n')
print('Visualizing model quality before training...')
visualize(model)
print('\n')
for epoch in range(1, args.epochs):
train(epoch)
val_ppl = evaluate(model, 'val')
if val_ppl < best_val_ppl:
with open(ckpt, 'wb') as f:
torch.save(model, f)
best_epoch = epoch
best_val_ppl = val_ppl
else:
## check whether to anneal lr
lr = optimizer.param_groups[0]['lr']
if args.anneal_lr and (len(all_val_ppls) > args.nonmono and val_ppl > min(all_val_ppls[:-args.nonmono]) and lr > 1e-5):
optimizer.param_groups[0]['lr'] /= args.lr_factor
if epoch % args.visualize_every == 0:
visualize(model)
all_val_ppls.append(val_ppl)
with open(ckpt, 'rb') as f:
model = torch.load(f)
model = model.to(device)
val_ppl = evaluate(model, 'val')
else:
with open(ckpt, 'rb') as f:
model = torch.load(f)
model = model.to(device)
model.eval()
with torch.no_grad():
## get document completion perplexities
test_ppl = evaluate(model, 'test', tc=args.tc, td=args.td)
## get most used topics
indices = torch.tensor(range(args.num_docs_train))
indices = torch.split(indices, args.batch_size)
thetaAvg = torch.zeros(1, args.num_topics).to(device)
thetaWeightedAvg = torch.zeros(1, args.num_topics).to(device)
cnt = 0
for idx, ind in enumerate(indices):
data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)
sums = data_batch.sum(1).unsqueeze(1)
cnt += sums.sum(0).squeeze().cpu().numpy()
if args.bow_norm:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
theta, _ = model.get_theta(normalized_data_batch)
thetaAvg += theta.sum(0).unsqueeze(0) / args.num_docs_train
weighed_theta = sums * theta
thetaWeightedAvg += weighed_theta.sum(0).unsqueeze(0)
if idx % 100 == 0 and idx > 0:
print('batch: {}/{}'.format(idx, len(indices)))
thetaWeightedAvg = thetaWeightedAvg.squeeze().cpu().numpy() / cnt
print('\nThe 10 most used topics are {}'.format(thetaWeightedAvg.argsort()[::-1][:10]))
## show topics
beta = model.get_beta()
topic_indices = list(np.random.choice(args.num_topics, 10)) # 10 random topics
print('\n')
for k in range(args.num_topics):#topic_indices:
gamma = beta[k]
top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])
topic_words = [vocab[a] for a in top_words]
print('Topic {}: {}'.format(k, topic_words))
if args.train_embeddings:
## show etm embeddings
try:
rho_etm = model.rho.weight.cpu()
except:
rho_etm = model.rho.cpu()
queries = ['andrew', 'woman', 'computer', 'sports', 'religion', 'man', 'love',
'intelligence', 'money', 'politics', 'health', 'people', 'family']
print('\n')
print('ETM embeddings...')
for word in queries:
print('word: {} .. etm neighbors: {}'.format(word, nearest_neighbors(word, rho_etm, vocab)))
print('\n')
|
py | b40dca7bc4bca9f4797cc9d60ee9a070e20506e7 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.devtools.artifactregistry.v1beta2",
manifest={
"YumArtifact",
"ImportYumArtifactsGcsSource",
"ImportYumArtifactsRequest",
"ImportYumArtifactsErrorInfo",
"ImportYumArtifactsResponse",
"ImportYumArtifactsMetadata",
},
)
class YumArtifact(proto.Message):
r"""A detailed representation of a Yum artifact.
Attributes:
name (str):
Output only. The Artifact Registry resource
name of the artifact.
package_name (str):
Output only. The yum package name of the
artifact.
package_type (google.cloud.artifactregistry_v1beta2.types.YumArtifact.PackageType):
Output only. An artifact is a binary or
source package.
architecture (str):
Output only. Operating system architecture of
the artifact.
"""
class PackageType(proto.Enum):
r"""Package type is either binary or source."""
PACKAGE_TYPE_UNSPECIFIED = 0
BINARY = 1
SOURCE = 2
name = proto.Field(proto.STRING, number=1,)
package_name = proto.Field(proto.STRING, number=2,)
package_type = proto.Field(proto.ENUM, number=3, enum=PackageType,)
architecture = proto.Field(proto.STRING, number=4,)
class ImportYumArtifactsGcsSource(proto.Message):
r"""Google Cloud Storage location where the artifacts currently
reside.
Attributes:
uris (Sequence[str]):
Cloud Storage paths URI (e.g., gs://my_bucket//my_object).
use_wildcards (bool):
Supports URI wildcards for matching multiple
objects from a single URI.
"""
uris = proto.RepeatedField(proto.STRING, number=1,)
use_wildcards = proto.Field(proto.BOOL, number=2,)
class ImportYumArtifactsRequest(proto.Message):
r"""The request to import new yum artifacts.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_source (google.cloud.artifactregistry_v1beta2.types.ImportYumArtifactsGcsSource):
Google Cloud Storage location where input
content is located.
This field is a member of `oneof`_ ``source``.
parent (str):
The name of the parent resource where the
artifacts will be imported.
"""
gcs_source = proto.Field(
proto.MESSAGE, number=2, oneof="source", message="ImportYumArtifactsGcsSource",
)
parent = proto.Field(proto.STRING, number=1,)
class ImportYumArtifactsErrorInfo(proto.Message):
r"""Error information explaining why a package was not imported.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_source (google.cloud.artifactregistry_v1beta2.types.ImportYumArtifactsGcsSource):
Google Cloud Storage location requested.
This field is a member of `oneof`_ ``source``.
error (google.rpc.status_pb2.Status):
The detailed error status.
"""
gcs_source = proto.Field(
proto.MESSAGE, number=1, oneof="source", message="ImportYumArtifactsGcsSource",
)
error = proto.Field(proto.MESSAGE, number=2, message=status_pb2.Status,)
class ImportYumArtifactsResponse(proto.Message):
r"""The response message from importing YUM artifacts.
Attributes:
yum_artifacts (Sequence[google.cloud.artifactregistry_v1beta2.types.YumArtifact]):
The yum artifacts imported.
errors (Sequence[google.cloud.artifactregistry_v1beta2.types.ImportYumArtifactsErrorInfo]):
Detailed error info for artifacts that were
not imported.
"""
yum_artifacts = proto.RepeatedField(proto.MESSAGE, number=1, message="YumArtifact",)
errors = proto.RepeatedField(
proto.MESSAGE, number=2, message="ImportYumArtifactsErrorInfo",
)
class ImportYumArtifactsMetadata(proto.Message):
r"""The operation metadata for importing artifacts.
"""
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | b40dcbf28ad93c35b0db9422ebf38d664ff2e60d | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d.line.colorbar"
_path_str = "scatter3d.line.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.new_plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`new_plotly.graph_objs.scatter3d.line
.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.scatter3d.line.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.scatter3d.line.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
py | b40dcdfc1a994ae2ecb28b2041f8540b7059d402 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Buildbot metrics module
Keeps track of counts and timings of various internal buildbot
activities.
Basic architecture:
MetricEvent.log(...)
||
\/
MetricLogObserver
||
\/
MetricHandler
||
\/
MetricWatcher
"""
from collections import deque
from twisted.python import log
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from twisted.application import service
from buildbot import util
from buildbot.util.bbcollections import defaultdict
import gc, os, sys
# Make use of the resource module if we can
try:
import resource
assert resource
except ImportError:
resource = None
class MetricEvent(object):
@classmethod
def log(cls, *args, **kwargs):
log.msg(metric=cls(*args, **kwargs))
class MetricCountEvent(MetricEvent):
def __init__(self, counter, count=1, absolute=False):
self.counter = counter
self.count = count
self.absolute = absolute
class MetricTimeEvent(MetricEvent):
def __init__(self, timer, elapsed):
self.timer = timer
self.elapsed = elapsed
ALARM_OK, ALARM_WARN, ALARM_CRIT = range(3)
ALARM_TEXT = ["OK", "WARN", "CRIT"]
class MetricAlarmEvent(MetricEvent):
def __init__(self, alarm, msg=None, level=ALARM_OK):
self.alarm = alarm
self.level = level
self.msg = msg
def countMethod(counter):
def decorator(func):
def wrapper(*args, **kwargs):
MetricCountEvent.log(counter=counter)
return func(*args, **kwargs)
return wrapper
return decorator
class Timer(object):
# For testing
_reactor = None
def __init__(self, name):
self.name = name
self.started = None
def startTimer(self, func):
def wrapper(*args, **kwargs):
self.start()
return func(*args, **kwargs)
return wrapper
def stopTimer(self, func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
finally:
self.stop()
return wrapper
def start(self):
self.started = util.now(self._reactor)
def stop(self):
if self.started is not None:
elapsed = util.now(self._reactor) - self.started
MetricTimeEvent.log(timer=self.name, elapsed=elapsed)
self.started = None
def timeMethod(name, _reactor=None):
def decorator(func):
t = Timer(name)
t._reactor=_reactor
def wrapper(*args, **kwargs):
t.start()
try:
return func(*args, **kwargs)
finally:
t.stop()
return wrapper
return decorator
class FiniteList(deque):
def __init__(self, maxlen=10):
self._maxlen = maxlen
deque.__init__(self)
def append(self, o):
deque.append(self, o)
if len(self) > self._maxlen:
self.popleft()
class AveragingFiniteList(FiniteList):
def __init__(self, maxlen=10):
FiniteList.__init__(self, maxlen)
self.average = 0
def append(self, o):
FiniteList.append(self, o)
self._calc()
def _calc(self):
if len(self) == 0:
self.average = 0
else:
self.average = float(sum(self)) / len(self)
return self.average
class MetricHandler(object):
def __init__(self, metrics):
self.metrics = metrics
self.watchers = []
self.reset()
def addWatcher(self, watcher):
self.watchers.append(watcher)
def removeWatcher(self, watcher):
self.watchers.remove(watcher)
# For subclasses to define
def reset(self):
raise NotImplementedError
def handle(self, eventDict, metric):
raise NotImplementedError
def get(self, metric):
raise NotImplementedError
def keys(self):
raise NotImplementedError
def report(self):
raise NotImplementedError
def asDict(self):
raise NotImplementedError
class MetricCountHandler(MetricHandler):
_counters = None
def reset(self):
self._counters = defaultdict(int)
def handle(self, eventDict, metric):
if metric.absolute:
self._counters[metric.counter] = metric.count
else:
self._counters[metric.counter] += metric.count
def keys(self):
return self._counters.keys()
def get(self, counter):
return self._counters[counter]
def report(self):
retval = []
for counter in sorted(self.keys()):
retval.append("Counter %s: %i" % (counter, self.get(counter)))
return "\n".join(retval)
def asDict(self):
retval = {}
for counter in sorted(self.keys()):
retval[counter] = self.get(counter)
return dict(counters=retval)
class MetricTimeHandler(MetricHandler):
_timers = None
def reset(self):
self._timers = defaultdict(AveragingFiniteList)
def handle(self, eventDict, metric):
self._timers[metric.timer].append(metric.elapsed)
def keys(self):
return self._timers.keys()
def get(self, timer):
return self._timers[timer].average
def report(self):
retval = []
for timer in sorted(self.keys()):
retval.append("Timer %s: %.3g" % (timer, self.get(timer)))
return "\n".join(retval)
def asDict(self):
retval = {}
for timer in sorted(self.keys()):
retval[timer] = self.get(timer)
return dict(timers=retval)
class MetricAlarmHandler(MetricHandler):
_alarms = None
def reset(self):
self._alarms = defaultdict(lambda x: ALARM_OK)
def handle(self, eventDict, metric):
self._alarms[metric.alarm] = (metric.level, metric.msg)
def report(self):
retval = []
for alarm, (level, msg) in sorted(self._alarms.items()):
if msg:
retval.append("%s %s: %s" % (ALARM_TEXT[level], alarm, msg))
else:
retval.append("%s %s" % (ALARM_TEXT[level], alarm))
return "\n".join(retval)
def asDict(self):
retval = {}
for alarm, (level, msg) in sorted(self._alarms.items()):
retval[alarm] = (ALARM_TEXT[level], msg)
return dict(alarms=retval)
class PollerWatcher(object):
def __init__(self, metrics):
self.metrics = metrics
def run(self):
# Check if 'BuildMaster.pollDatabaseChanges()' and
# 'BuildMaster.pollDatabaseBuildRequests()' are running fast enough
h = self.metrics.getHandler(MetricTimeEvent)
if not h:
log.msg("Couldn't get MetricTimeEvent handler")
MetricAlarmEvent.log('PollerWatcher',
msg="Coudln't get MetricTimeEvent handler",
level=ALARM_WARN)
return
for method in ('BuildMaster.pollDatabaseChanges()',
'BuildMaster.pollDatabaseBuildRequests()'):
t = h.get(method)
db_poll_interval = self.metrics.parent.db_poll_interval
if t < 0.8 * db_poll_interval:
level = ALARM_OK
elif t < db_poll_interval:
level = ALARM_WARN
else:
level = ALARM_CRIT
MetricAlarmEvent.log(method, level=level)
class AttachedSlavesWatcher(object):
def __init__(self, metrics):
self.metrics = metrics
def run(self):
# Check if 'BotMaster.attached_slaves' equals
# 'AbstractBuildSlave.attached_slaves'
h = self.metrics.getHandler(MetricCountEvent)
if not h:
log.msg("Couldn't get MetricCountEvent handler")
MetricAlarmEvent.log('AttachedSlavesWatcher',
msg="Coudln't get MetricCountEvent handler",
level=ALARM_WARN)
return
botmaster_count = h.get('BotMaster.attached_slaves')
buildslave_count = h.get('AbstractBuildSlave.attached_slaves')
# We let these be off by one since they're counted at slightly
# different times
if abs(botmaster_count - buildslave_count) > 1:
level = ALARM_WARN
else:
level = ALARM_OK
MetricAlarmEvent.log('attached_slaves',
msg='%s %s' % (botmaster_count, buildslave_count),
level=level)
def _get_rss():
if sys.platform == 'linux2':
try:
return int(open("/proc/%i/statm" % os.getpid()).read().split()[1])
except:
return 0
return 0
def periodicCheck(_reactor=reactor):
# Measure how much garbage we have
garbage_count = len(gc.garbage)
MetricCountEvent.log('gc.garbage', garbage_count, absolute=True)
if garbage_count == 0:
level = ALARM_OK
else:
level = ALARM_WARN
MetricAlarmEvent.log('gc.garbage', level=level)
if resource:
r = resource.getrusage(resource.RUSAGE_SELF)
attrs = ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_ixrss', 'ru_idrss',
'ru_isrss', 'ru_minflt', 'ru_majflt', 'ru_nswap',
'ru_inblock', 'ru_oublock', 'ru_msgsnd', 'ru_msgrcv',
'ru_nsignals', 'ru_nvcsw', 'ru_nivcsw']
for i,a in enumerate(attrs):
# Linux versions prior to 2.6.32 didn't report this value, but we
# can calculate it from /proc/<pid>/statm
v = r[i]
if a == 'ru_maxrss' and v == 0:
v = _get_rss() * resource.getpagesize() / 1024
MetricCountEvent.log('resource.%s' % a, v, absolute=True)
MetricCountEvent.log('resource.pagesize', resource.getpagesize(), absolute=True)
# Measure the reactor delay
then = util.now(_reactor)
dt = 0.1
def cb():
now = util.now(_reactor)
delay = (now - then) - dt
MetricTimeEvent.log("reactorDelay", delay)
_reactor.callLater(dt, cb)
class MetricLogObserver(service.MultiService):
_reactor = reactor
def __init__(self, config):
service.MultiService.__init__(self)
self.config = config
self.periodic_task = None
self.log_task = None
# Mapping of metric type to handlers for that type
self.handlers = {}
# Register our default handlers
self.registerHandler(MetricCountEvent, MetricCountHandler(self))
self.registerHandler(MetricTimeEvent, MetricTimeHandler(self))
self.registerHandler(MetricAlarmEvent, MetricAlarmHandler(self))
# Make sure our changes poller is behaving
self.getHandler(MetricTimeEvent).addWatcher(PollerWatcher(self))
self.getHandler(MetricCountEvent).addWatcher(
AttachedSlavesWatcher(self))
def reloadConfig(self, config):
self.config = config
log_interval = self.config.get('log_interval', 60)
if self.log_task:
self.log_task.stop()
if log_interval:
# Start up periodic logging
self.log_task = LoopingCall(self.report)
self.log_task.clock = self._reactor
self.log_task.start(log_interval)
else:
self.log_task = None
periodic_interval = self.config.get('periodic_interval', 10)
if self.periodic_task:
self.periodic_task.stop()
if periodic_interval:
self.periodic_task = LoopingCall(periodicCheck, self._reactor)
self.periodic_task.clock = self._reactor
self.periodic_task.start(periodic_interval)
else:
self.periodic_task = None
def startService(self):
log.msg("Starting %s" % self)
service.MultiService.startService(self)
log.addObserver(self.emit)
self.reloadConfig(self.config)
def stopService(self):
log.msg("Stopping %s" % self)
service.MultiService.stopService(self)
if self.periodic_task:
self.periodic_task.stop()
self.periodic_task = None
if self.log_task:
self.log_task.stop()
self.log_task = None
log.removeObserver(self.emit)
def registerHandler(self, interface, handler):
old = self.getHandler(interface)
self.handlers[interface] = handler
return old
def getHandler(self, interface):
return self.handlers.get(interface)
def emit(self, eventDict):
# Ignore non-statistic events
metric = eventDict.get('metric')
if not metric or not isinstance(metric, MetricEvent):
return
if metric.__class__ not in self.handlers:
return
h = self.handlers[metric.__class__]
h.handle(eventDict, metric)
for w in h.watchers:
w.run()
def asDict(self):
retval = {}
for interface, handler in self.handlers.iteritems():
retval.update(handler.asDict())
return retval
def report(self):
try:
for interface, handler in self.handlers.iteritems():
report = handler.report()
if not report:
continue
for line in report.split("\n"):
log.msg(line)
except:
log.err()
|
py | b40dce2dcc91d8abf04e9f96e2ffa8f3f0b77188 | import pytest
import copy
class Compare(object):
def __init__(self):
self._diff = {}
def diff(self, d1, d2):
self._recursive_compare(d1, d2)
return self._diff
def _recursive_compare(self, d1, d2, level='root'):
if isinstance(d1, dict) and isinstance(d2, dict):
if d1.keys() != d2.keys():
s1 = set(d1.keys())
s2 = set(d2.keys())
print('{:<20} + {} - {}'.format(level, s1-s2, s2-s1))
common_keys = s1 & s2
else:
common_keys = set(d1.keys())
for k in common_keys:
self._recursive_compare(d1[k], d2[k], level='{}.{}'.format(level, k))
elif isinstance(d1, list) and isinstance(d2, list):
if len(d1) != len(d2):
print('{:<20} len1={}; len2={}'.format(level, len(d1), len(d2)))
common_len = min(len(d1), len(d2))
for i in range(common_len):
self._recursive_compare(d1[i], d2[i], level='{}[{}]'.format(level, i))
else:
if d1 != d2:
diff = '{:<20} {} != {}'.format(level, d1, d2)
self._nested_set(level.split('.')[1:], d2)
def _nested_set(self, keys, value):
d = self._diff
for key in keys[:-1]:
if key in d:
d = d[key]
else:
d = d.setdefault(key, {})
d[keys[-1]] = value
@pytest.mark.ConfigTest
def test_recursive_compare():
first = {
'a': 1,
'b': 'one',
'c': {
'd': 2,
'e': 'two'
}
}
second = copy.deepcopy(first)
second['b'] = 'three'
second['c']['d'] = 3
diff = Compare().diff(first, second)
assert(len(diff.keys()) == 2)
assert(len(diff['c'].keys()) == 1)
assert(diff['b'] == 'three')
assert(diff['c']['d'] == 3)
@pytest.mark.skip(reason='IndexError: list index out of range')
def test_obj_compare():
a = lambda: None
a.name = 'asdf'
b = lambda: None
b.name = 'asdf'
a.c = lambda: None
b.c = lambda: None
a.c.name ='asdf'
b.c.name = 'ddd'
diff = Compare().diff(a, b)
assert(len(diff.keys))
if __name__ == '__main__':
pytest.main(['-s', __file__])
|
py | b40dcf7d124749deb07597cb4f2869a0094703e0 | import setuptools
with open("README.rst", "r", encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name="arclet-alconna",
version="1.0.0",
author="ArcletProject",
author_email="[email protected]",
description="A Fast Command Analyser based on Dict",
license='MIT',
long_description=long_description,
long_description_content_type="text/rst",
url="https://github.com/ArcletProject/Alconna",
package_dir={"": "src"},
packages=setuptools.find_namespace_packages(where="src"),
install_requires=['typing_extensions'],
extras_require={
'graia': [
'arclet-alconna-graia',
],
'cli': [
'arclet-alconna-cli'
],
},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
],
include_package_data=True,
keywords=['command', 'argparse', 'fast', 'alconna', 'cli', 'parsing', 'optparse', 'command-line', 'parser'],
python_requires='>=3.8',
project_urls={
'Documentation': 'https://arcletproject.github.io/docs/alconna/tutorial',
'Bug Reports': 'https://github.com/ArcletProject/Alconna/issues',
'Source': 'https://github.com/ArcletProject/Alconna',
},
)
|
py | b40dcfcfdcc224134c751a62befffe7a8cd119e4 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register("hello-viewset", views.HelloViewSet, basename="hello-viewset")
router.register("profile", views.UserProfileViewSet)
router.register("feed", views.ProfileFeedItemViewSet)
urlpatterns = [
path("hello-view/", views.HelloApiView.as_view()),
path("login/", views.UserLoginApiView.as_view()),
path("", include(router.urls)),
]
|
py | b40dd052e87945fdd546e83e934f2487387d74ed | import cs_grading as CS
import os
text_editor = 'subl'
#Options for p5
run_p5_test = 1 # Change to 0 to turn off these tests .
p5_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p5_source_files = '../barry.cpp' # The name and location of the student's solution file relative to this script.
p5_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p5_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
#Options for p6
run_p6_test = 1 # Change to 0 to turn off these tests .
p6_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p6_source_files = '../hw1q6.cpp' # The name and location of the student's solution file relative to this script.
p6_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p6_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
#Options for p7
run_p7_test = 1 # Change to 0 to turn off these tests .
p7_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p7_source_files = '../game_of_pointers.cpp' # The name and location of the student's solution file relative to this script.
p7_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p7_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
### p5 run tests
if run_p5_test:
p5_result_file = 'p5_result.txt'
p5_valgrind_file = 'p5_valgrind.txt'
p5_target = 'barry'
if CS.check_file_existence(p5_result_file):
CS.remove_file(p5_result_file)
if CS.check_file_existence(p5_valgrind_file):
CS.remove_file(p5_valgrind_file)
CS.compile_student_code(0,
source_files=p5_source_files,
target=p5_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q5_student_output')
f = open('q5_input/input.txt', 'r')
correct_test = 0
for i in xrange(1,11):
string = f.readline().strip()
output_file = 'q5_student_output/output' + str(i) + '.out'
expected_output_file = 'q5_output/output' + str(i) + '.txt'
CS.run_executable('./',
p5_target,
string + ' > ' + output_file,
use_valgrind=p5_use_valgrind,
valgrind_log_filename=p5_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p5_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p5_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p5_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p5_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == 10:
CS.write_message(p5_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p5_result_file, 'Failed ' + str(10 - correct_test) + ' tests!')
if p5_open_results:
CS.open_file(p5_result_file, text_editor)
if p5_use_valgrind:
CS.open_file(p5_valgrind_file, text_editor)
# Clean up
if p5_remove_files:
CS.remove_file(p5_target)
os.system('rm -r q5_student_output')
### p6 run tests
if run_p6_test:
p6_result_file = 'p6_result.txt'
p6_valgrind_file = 'p6_valgrind.txt'
p6_target = 'hw1q6'
if CS.check_file_existence(p6_result_file):
CS.remove_file(p6_result_file)
if CS.check_file_existence(p6_valgrind_file):
CS.remove_file(p6_valgrind_file)
CS.compile_student_code(0,
source_files=p6_source_files,
target=p6_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q6_student_output')
correct_test = 0
for i in xrange(1,11):
string = 'q6_input/input' + str(i) + '.txt'
output_file = 'q6_student_output/output' + str(i) + '.out'
expected_output_file = 'q6_output/output' + str(i) + '.txt'
CS.run_executable('./',
p6_target,
string + ' > ' + output_file,
use_valgrind=p6_use_valgrind,
valgrind_log_filename=p6_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p6_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p6_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p6_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p6_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == 10:
CS.write_message(p6_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p6_result_file, 'Failed ' + str(10 - correct_test) + ' tests!')
if p6_open_results:
CS.open_file(p6_result_file, text_editor)
if p6_use_valgrind:
CS.open_file(p6_valgrind_file, text_editor)
# Clean up
if p6_remove_files:
CS.remove_file(p6_target)
os.system('rm -r q6_student_output')
if run_p7_test:
p7_result_file = 'got_result.txt'
p7_valgrind_file = 'got_valgrind.txt'
p7_target = 'game_of_pointers'
p7_test_count = 16
if CS.check_file_existence(p7_result_file):
CS.remove_file(p7_result_file)
if CS.check_file_existence(p7_valgrind_file):
CS.remove_file(p7_valgrind_file)
CS.compile_student_code(0,
source_files=p7_source_files,
target=p7_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q7_student_output')
correct_test = 0
for i in xrange(1,p7_test_count + 1):
parameters = 'q7_input/input' + str(i) + '.txt'
parameters += ' q7_student_output/output' + str(i) +'.txt'
output_file = 'q7_student_output/output' + str(i) +'.txt'
expected_output_file = 'q7_input/solution' + str(i) + '.txt'
CS.run_executable('./',
p7_target,
parameters,
use_valgrind=p7_use_valgrind,
valgrind_log_filename=p7_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p7_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p7_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p7_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p7_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == p7_test_count:
CS.write_message(p7_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p7_result_file, 'Failed ' + str(p7_test_count - correct_test) + ' tests!')
if p6_open_results:
CS.open_file(p7_result_file, text_editor)
if p6_use_valgrind:
CS.open_file(p7_valgrind_file, text_editor)
# Clean up
if p7_remove_files:
CS.remove_file(p7_target)
os.system('rm -r q7_student_output')
|
py | b40dd060ed8aa052fe5924acd8efd36f18307830 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Generic sentence handling tools: hopefully reusable.
"""
from typing import Set
class _BaseSentence:
"""
A base sentence class for a particular protocol.
Using this base class, specific sentence classes can almost automatically
be created for a particular protocol.
To do this, fill the ALLOWED_ATTRIBUTES class attribute using
the C{getSentenceAttributes} class method of the producer::
class FooSentence(BaseSentence):
\"\"\"
A sentence for integalactic transmodulator sentences.
@ivar transmogrificationConstant: The value used in the
transmogrifier while producing this sentence, corrected for
gravitational fields.
@type transmogrificationConstant: C{Tummy}
\"\"\"
ALLOWED_ATTRIBUTES = FooProtocol.getSentenceAttributes()
@ivar presentAttributes: An iterable containing the names of the
attributes that are present in this sentence.
@type presentAttributes: iterable of C{str}
@cvar ALLOWED_ATTRIBUTES: A set of attributes that are allowed in this
sentence.
@type ALLOWED_ATTRIBUTES: C{set} of C{str}
"""
ALLOWED_ATTRIBUTES: Set[str] = set()
def __init__(self, sentenceData):
"""
Initializes a sentence with parsed sentence data.
@param sentenceData: The parsed sentence data.
@type sentenceData: C{dict} (C{str} -> C{str} or L{None})
"""
self._sentenceData = sentenceData
@property
def presentAttributes(self):
"""
An iterable containing the names of the attributes that are present in
this sentence.
@return: The iterable of names of present attributes.
@rtype: iterable of C{str}
"""
return iter(self._sentenceData)
def __getattr__(self, name):
"""
Gets an attribute of this sentence.
"""
if name in self.ALLOWED_ATTRIBUTES:
return self._sentenceData.get(name, None)
else:
className = self.__class__.__name__
msg = f"{className} sentences have no {name} attributes"
raise AttributeError(msg)
def __repr__(self) -> str:
"""
Returns a textual representation of this sentence.
@return: A textual representation of this sentence.
@rtype: C{str}
"""
items = self._sentenceData.items()
data = [f"{k}: {v}" for k, v in sorted(items) if k != "type"]
dataRepr = ", ".join(data)
typeRepr = self._sentenceData.get("type") or "unknown type"
className = self.__class__.__name__
return f"<{className} ({typeRepr}) {{{dataRepr}}}>"
class _PositioningSentenceProducerMixin:
"""
A mixin for certain protocols that produce positioning sentences.
This mixin helps protocols that store the layout of sentences that they
consume in a C{_SENTENCE_CONTENTS} class variable provide all sentence
attributes that can ever occur. It does this by providing a class method,
C{getSentenceAttributes}, which iterates over all sentence types and
collects the possible sentence attributes.
"""
@classmethod
def getSentenceAttributes(cls):
"""
Returns a set of all attributes that might be found in the sentences
produced by this protocol.
This is basically a set of all the attributes of all the sentences that
this protocol can produce.
@return: The set of all possible sentence attribute names.
@rtype: C{set} of C{str}
"""
attributes = {"type"}
for attributeList in cls._SENTENCE_CONTENTS.values():
for attribute in attributeList:
if attribute is None:
continue
attributes.add(attribute)
return attributes
|
py | b40dd10087794fbba33d65ee26f35e616a7a37a4 | # -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import unittest
from itertools import product
from sdc.tests.indexes import (
TestEmptyIndex,
TestPositionalIndex,
TestRangeIndex,
TestInt64Index,
)
from sdc.tests.indexes.index_datagens import _generate_index_param_values, get_sample_index
from sdc.datatypes.indexes import *
class TestIndexes(
TestEmptyIndex,
TestPositionalIndex,
TestRangeIndex,
TestInt64Index
):
""" This suite combines tests from all concrete index-type suites and also adds
tests for common use-cases that need to be checked for all index-types. """
def assert_indexes_equal(self, index1, index2):
# for SDC indexes that are represented with arrays (e.g. Uint64Index)
supported_pandas_indexes = (pd.RangeIndex, pd.Int64Index, )
if (not isinstance(index1, supported_pandas_indexes)
or not isinstance(index2, supported_pandas_indexes)):
index1 = np.asarray(index1)
index2 = np.asarray(index2)
np.testing.assert_array_equal(index1, index2)
else:
pd.testing.assert_index_equal(index1, index2)
@unittest.skip("TODO: support boxing/unboxing and parent ref for Python ranges in Numba")
def test_indexes_unbox_data_id_check(self):
def test_impl(index):
return index
sdc_func = self.jit(test_impl)
n = 11
indexes_to_test = [
pd.RangeIndex(n, name='abc'), # only this one fails, other pass
pd.Int64Index(np.arange(n), name='abc'),
]
data_attr_names_map = {
pd.RangeIndex: '_range',
pd.Int64Index: '_data',
}
for index in indexes_to_test:
with self.subTest(index_type=type(index)):
result = sdc_func(index)
result_ref = test_impl(index)
data1, data2, data3 = map(
lambda x: getattr(x, data_attr_names_map[type(x)]),
[index, result, result_ref]
)
self.assertIs(data1, data3)
self.assertIs(data2, data3)
@unittest.skip("Needs writable native struct type members in Numba")
def test_indexes_named_set_name(self):
def test_impl(index):
index.name = 'def'
return index
sdc_func = self.jit(test_impl)
n = 11
indexes_to_test = [
pd.RangeIndex(n, name='abc'),
pd.Int64Index(np.arange(n), name='abc'),
]
for index in indexes_to_test:
with self.subTest(index_type=type(index)):
index1 = index.copy(deep=True)
index2 = index.copy(deep=True)
result = sdc_func(index1)
result_ref = test_impl(index2)
pd.testing.assert_index_equal(result, result_ref)
@unittest.skip("Needs writable native struct type members and single common type for name")
def test_indexes_unnamed_set_name(self):
def test_impl(index):
index.name = 'def'
return index
sdc_func = self.jit(test_impl)
n = 11
indexes_to_test = [
pd.RangeIndex(n),
pd.Int64Index(np.arange(n)),
]
for index in indexes_to_test:
with self.subTest(index_type=type(index)):
index1 = index.copy(deep=True)
index2 = index.copy(deep=True)
result = sdc_func(index1)
result_ref = test_impl(index2)
pd.testing.assert_index_equal(result, result_ref)
@unittest.skip("Need support unboxing pandas indexes with parent ref")
def test_indexes_operator_is_unbox(self):
def test_impl(index1, index2):
return index1 is index2
sdc_func = self.jit(test_impl)
indexes_to_test = [
pd.RangeIndex(1, 21, 3),
pd.Int64Index([1, 2, 3, 5, 6, 3, 4]),
]
for index in indexes_to_test:
# positive testcase
with self.subTest(subtest="same indexes"):
index1 = index.copy(deep=True)
index2 = index1
result = sdc_func(index1, index2)
result_ref = test_impl(index1, index2)
self.assertEqual(result, result_ref)
self.assertEqual(result, True)
# negative testcase
with self.subTest(subtest="not same indexes"):
index1 = index.copy(deep=True)
index2 = index.copy(deep=True)
result = sdc_func(index1, index2)
result_ref = test_impl(index1, index2)
self.assertEqual(result, result_ref)
self.assertEqual(result, False)
def test_indexes_unbox_series_with_index(self):
@self.jit
def test_impl(S):
# TO-DO: this actually includes calling 'index' attribute overload, should really be S._index,
# but this requires separate type (e.g. PositionalIndexType) instead of types.none as default index
return S.index
n = 11
for index in _generate_index_param_values(n):
expected_res = pd.RangeIndex(n) if index is None else index
with self.subTest(series_index=index):
S = pd.Series(np.ones(n), index=index)
result = test_impl(S)
self.assert_indexes_equal(result, expected_res)
def test_indexes_create_series_with_index(self):
@self.jit
def test_impl(data, index):
S = pd.Series(data=data, index=index)
return S.index
n = 11
series_data = np.ones(n)
for index in _generate_index_param_values(n):
expected_res = pd.RangeIndex(n) if index is None else index
with self.subTest(series_index=index):
result = test_impl(series_data, index)
self.assert_indexes_equal(result, expected_res)
def test_indexes_box_series_with_index(self):
def test_impl(data, index):
return pd.Series(data=data, index=index)
sdc_func = self.jit(test_impl)
n = 11
series_data = np.ones(n)
for index in _generate_index_param_values(n):
with self.subTest(series_index=index):
result = sdc_func(series_data, index)
result_ref = test_impl(series_data, index)
pd.testing.assert_series_equal(result, result_ref)
def test_indexes_get_series_index(self):
def test_impl(S):
return S.index
sdc_func = self.jit(test_impl)
n = 11
for index in _generate_index_param_values(n):
with self.subTest(series_index=index):
S = pd.Series(np.ones(n), index=index)
result = sdc_func(S)
result_ref = test_impl(S)
self.assert_indexes_equal(result, result_ref)
def test_indexes_unbox_df_with_index(self):
@self.jit
def test_impl(df):
# TO-DO: this actually includes calling 'index' attribute overload, should really be df._index,
# but this requires separate type (e.g. PositionalIndexType) instead of types.none as default index
return df.index
n = 11
for index in _generate_index_param_values(n):
expected_res = pd.RangeIndex(n) if index is None else index
with self.subTest(df_index=index):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)}, index=index)
result = test_impl(df)
self.assert_indexes_equal(result, expected_res)
def test_indexes_create_df_with_index(self):
@self.jit
def test_impl(A, B, index):
df = pd.DataFrame({'A': A, 'B': B}, index=index)
return df.index
n = 11
A, B = np.ones(n), np.arange(n)
for index in _generate_index_param_values(n):
expected_res = pd.RangeIndex(n) if index is None else index
with self.subTest(df_index=index):
result = test_impl(A, B, index)
self.assert_indexes_equal(result, expected_res)
def test_indexes_box_df_with_index(self):
def test_impl(A, B, index):
return pd.DataFrame({'A': A, 'B': B}, index=index)
sdc_func = self.jit(test_impl)
n = 11
A, B = np.ones(n), np.arange(n, dtype=np.intp)
for index in _generate_index_param_values(n):
with self.subTest(df_index=index):
result = sdc_func(A, B, index)
result_ref = test_impl(A, B, index)
pd.testing.assert_frame_equal(result, result_ref)
def test_indexes_get_df_index(self):
def test_impl(df):
return df.index
sdc_func = self.jit(test_impl)
n = 11
for index in _generate_index_param_values(n):
with self.subTest(df_index=index):
df = pd.DataFrame({'A': np.ones(n)}, index=index)
result = sdc_func(df)
result_ref = test_impl(df)
self.assert_indexes_equal(result, result_ref)
def test_indexes_support_numpy_like_take_by(self):
""" Verifies numpy_like.take can handle SDC index types as indices """
from sdc.functions import numpy_like
def pyfunc(arr, index):
return np.take(arr, index)
@self.jit
def sdc_func(arr, index):
return numpy_like.take(arr, index)
n, k = 1000, 200
np.random.seed(0)
arr = np.arange(n) * 2
indexes_to_test = [
get_sample_index(k, PositionalIndexType),
get_sample_index(k, RangeIndexType),
get_sample_index(k, Int64IndexType),
]
for index in indexes_to_test:
with self.subTest(index=index):
result = sdc_func(arr, index)
result_ref = pyfunc(arr, index)
np.testing.assert_array_equal(result, result_ref)
def test_indexes_support_series_operator_add(self):
def test_impl(data, index1, index2):
S1 = pd.Series(data, index=index1)
S2 = pd.Series(2 * data + 1, index=index2)
return S1 + S2
sdc_func = self.jit(test_impl)
n = 11
series_data = np.arange(n, dtype=np.float64)
index_params_to_test = [
None,
pd.RangeIndex(0, -n, -1),
pd.Int64Index(np.arange(n) * 2),
]
for index1, index2 in product(index_params_to_test, repeat=2):
with self.subTest(index1=index1, index2=index2):
result = sdc_func(series_data, index1, index2)
result_ref = test_impl(series_data, index1, index2)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
def test_indexes_support_series_operator_lt(self):
def test_impl(data, index1, index2):
S1 = pd.Series(data, index=index1)
S2 = pd.Series(2 * data + 1, index=index2)
return S1 < S2
sdc_func = self.jit(test_impl)
n = 11
series_data = np.arange(n, dtype=np.float64)
index_params_to_test = [
None,
pd.RangeIndex(0, -n, -1),
pd.Int64Index(np.arange(n) * 2),
]
for index1 in index_params_to_test:
index2 = index1
with self.subTest(index1=index1, index2=index2):
result = sdc_func(series_data, index1, index2)
result_ref = test_impl(series_data, index1, index2)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
def test_indexes_support_series_reindexing(self):
from sdc.datatypes.common_functions import sdc_reindex_series
def pyfunc(data, index, name, by_index):
S = pd.Series(data, index, name=name)
return S.reindex(by_index)
@self.jit
def sdc_func(data, index, name, by_index):
return sdc_reindex_series(data, index, name, by_index)
n = 17
np.random.seed(0)
mask = np.random.choice([True, False], n)
name = 'asdf'
range_index = pd.RangeIndex(n)
int64_index = pd.Int64Index(np.random.choice(range_index.values, n, replace=False))
indexes_combinations = [
(range_index, range_index),
(range_index, range_index[::-1]),
(range_index[::-1], range_index),
(range_index, int64_index),
(int64_index, range_index),
]
for index1, index2 in indexes_combinations:
with self.subTest(index1=index1, index2=index2):
result = sdc_func(mask, index1, name, index2)
result_ref = pyfunc(mask, index1, name, index2)
pd.testing.assert_series_equal(result, result_ref)
if __name__ == "__main__":
unittest.main()
|
py | b40dd11572c10690ca6a28b1bad5489fb41fb194 | """
Copyright (C) 2008, 2009 - Luke Kenneth Casson Leighton <[email protected]>
"""
from pyjamas import DOM
from pyjamas.ui import Event
class ClickHandler(object):
def __init__(self, preventDefault=False):
self._clickListeners = []
self._doubleclickListeners = []
self._clickPreventDefault = preventDefault
self.sinkEvents(Event.ONCLICK)
self.sinkEvents(Event.ONDBLCLICK)
def onClick(self, sender=None):
pass
def onDoubleClick(self, sender=None):
pass
def addDoubleClickListener(self, listener):
self._doubleclickListeners.append(listener)
def addClickListener(self, listener):
stylename = self.getStyleName()
if stylename:
self.addStyleName("%s-clickable" % stylename.split()[0])
self._clickListeners.append(listener)
def onBrowserEvent(self, event):
"""Listen to events raised by the browser and call the appropriate
method of the listener (widget, ..) object.
"""
type = DOM.eventGetType(event)
if type == "click":
if self._clickPreventDefault:
DOM.eventPreventDefault(event)
for listener in self._clickListeners:
if hasattr(listener, "onClick"):
listener.onClick(self)
else:
listener(self)
elif type == "dblclick":
if self._clickPreventDefault:
DOM.eventPreventDefault(event)
for listener in self._doubleclickListeners:
if hasattr(listener, "onDoubleClick"):
listener.onDoubleClick(self)
else:
listener(self)
def removeClickListener(self, listener):
self._clickListeners.remove(listener)
def removeDoubleClickListener(self, listener):
self._doubleclickListeners.remove(listener)
def clearClickListener(self):
self._clickListeners[:] = []
def clearDoubleClickListener(self):
self._doubleclickListeners.remove(listener)[:] = []
|
py | b40dd1e1f4d0a6d6b32be482c1d239b123be8d3d | import atnp.categorization as categorization
import atnp.preprocessor as preprocessor
import atnp.visualizer as visualizer
import atnp.vectorizer as vectorizer
import atnp.sentiment as sentiment
import atnp.clusters as clusters
import matplotlib.pyplot as plt
import atnp.topics as topics
import atnp.utils as utils
import pandas as pd
import imgkit
import os
from sklearn.metrics import silhouette_samples, silhouette_score, davies_bouldin_score
from atnp.corpus import TokenizedCorpusReader
from contextlib import redirect_stdout
from collections import Counter
from joblib import dump, load
from spacy import displacy
class Analyser():
def __init__(self, path_corpus, path_pickle, path_docs, labels_title):
self.path_docs = path_docs
self.path_corpus = path_corpus
self.path_pickle = path_pickle
self.labels_title = labels_title
self.__init_corpus__()
def __init_corpus__(self):
utils.create_if_not_exists(self.path_docs)
self.corpus = TokenizedCorpusReader(
self.path_corpus, header=['fileid', 'subject', 'journal'])
self.sujects = self.corpus.categ.get_cats("subject")
self.journals = self.corpus.categ.get_cats("journal")
def __get_docs_path(self, path):
path = os.path.join(self.path_docs, path)
utils.create_if_not_exists(path)
return path
def __get_pickle_path(self, path):
path = os.path.join(self.path_pickle, path)
utils.create_if_not_exists(path)
return path
def summary(self):
journal_count = [self.corpus.categ.get(
fileid, "journal") for fileid in self.corpus.fileids()]
subject_count = [self.corpus.categ.get(
fileid, "subject") for fileid in self.corpus.fileids()]
print(dict(Counter(journal_count)))
print(dict(Counter(subject_count)))
def wordcloud_subjects(self):
base_path = self.__get_docs_path("wordcloud")
for subject in self.sujects:
print("Making wordcloud of %s" % subject)
corpus = self.corpus.apply((preprocessor.word_segmentation,
preprocessor.clean_tokens), categories=[subject])
wc = visualizer.word_cloud(corpus)
wc.to_file(os.path.join(base_path, subject + ".png"))
def get_tokens(self, mode):
namefile = "%s.pickle" % mode
path = self.__get_pickle_path(os.path.join("tokens"))
total_path = os.path.join(path, namefile)
if not os.path.exists(total_path):
corpus = None
if mode == "stem":
corpus = self.corpus.apply((preprocessor.word_segmentation,
preprocessor.clean_tokens,
preprocessor.remove_acentuation,
preprocessor.stemmer_doc))
elif mode == "not_stem":
corpus = self.corpus.apply((preprocessor.word_segmentation,
preprocessor.clean_tokens,
preprocessor.remove_acentuation))
elif mode == "mwe":
corpus = self.corpus.apply((preprocessor.tokenize_mwe,
preprocessor.clean_tokens,
preprocessor.remove_acentuation))
utils.save_pickle(
path, namefile, [(list(doc), fileid) for doc, fileid in corpus])
return utils.get_pickle(total_path)
def gen_head_vectors(self):
base_path = self.__get_docs_path("corpusinfo")
corpus = self.get_tokens("stem")
docs, fileids = zip(*corpus)
for name, vector_fun in [("tf-idf", vectorizer.generate_tf_idf), ("frequency", vectorizer.generate_count)]:
print("Making head for %s" % name)
vectors, vectorizer_model = vector_fun(docs)
df = pd.DataFrame(vectors.toarray())
df.columns = vectorizer_model.get_feature_names()
df.insert(0, 'Document', fileids)
with open(os.path.join(base_path, "head-%s.txt" % name), "w") as file:
file.write(str(df.head()))
def reducers_with_tfidf(self):
base_path = self.__get_docs_path("reducers")
for mode in ["stem", "not_stem", "mwe"]:
corpus = self.get_tokens(mode)
self.__reducers_with_tfidf(corpus, os.path.join(base_path, mode))
def __reducers_with_tfidf(self, corpus, base_path):
utils.create_if_not_exists(base_path)
docs, fileids = zip(*corpus)
vectors, _ = vectorizer.generate_tf_idf(docs)
for label in ['subject', 'journal']:
y = [self.corpus.categ.get(fileid, label) for fileid in fileids]
for dimen in [2, 3]:
base_sub_section_path = os.path.join(base_path, "%dd" % dimen)
utils.create_if_not_exists(base_sub_section_path)
for name, function in [
('umap', visualizer.umap_word_visualize),
('t-sne', visualizer.t_sne_word_visualize),
('pca', visualizer.pca_word_visualizer)]:
print("Saving %s %s %s" % (label, dimen, name))
fig, _ = function(vectors, y, dimen)
fig.savefig(os.path.join(base_sub_section_path,
"%s-%s.png" % (label, name)))
plt.close(fig)
del y
del docs
del fileids
del vectors
def word2vec_personalities(self):
table = []
base_path = self.__get_docs_path(
os.path.join("word2vec"))
pickle_path = self.__get_pickle_path(os.path.join("word2vec"))
for theme, persons in [
('impeachment-dilma', ['dilma', 'lula', 'cunha', 'bolsonaro']),
('reforma-trabalhista', ['temer', 'maia', 'empresa', 'empregado']),
('afastamento-aecio', ['aecio', 'temer', 'jbs']),
('prisao-lula', ['lula', 'moro', ]),
('soltura-lula', ['lula', 'moro', 'stf', 'supremo']),
('reforma-previdencia', ['maia', 'guedes', "bolsonaro"])]:
model_path_name = os.path.join(
pickle_path, "%s-word2vec.model" % theme)
model = None
if not os.path.exists(model_path_name):
print("Making model for %s" % theme)
corpus = self.corpus.apply(
(preprocessor.word_segmentation,
preprocessor.clean_tokens,
preprocessor.remove_acentuation), categories=[theme])
model = vectorizer.generate_word2vec(
[list(doc) for doc, fileid in corpus])
else:
print("Loading model %s" % theme)
model = vectorizer.load_word2vec(model_path_name)
df = pd.DataFrame(model.wv.vectors)
df.insert(0, 'Token', pd.Series(model.wv.index_to_key).values)
with open(os.path.join(base_path, "head-%s.txt" % theme), "w") as file:
file.write(str(df.head()))
x_vals, y_vals, labels = visualizer.reduce_dimensions_word2vec(
model)
fig, _ = visualizer.plot_word2vec_with_matplotlib(
x_vals, y_vals, labels, "Word2Vec %s" % self.labels_title[theme])
fig.savefig(os.path.join(base_path, "word2vec-%s.png" % (theme)))
plt.close(fig)
if not os.path.exists(model_path_name):
model.save(model_path_name)
for person in persons:
print("Making table of data %s to %s" % (theme, person))
x = [theme, person]
for words in model.wv.most_similar(person):
x.extend(list(words))
table.append(x)
del model
del corpus
utils.save_csv(base_path, "personalities", table)
def clusers_plot(self, type_model, function, tokens_type):
base_path = self.__get_docs_path(
os.path.join("clusters", type_model))
corpus = self.get_tokens("stem")
docs, fileids = zip(*corpus)
vectors, _ = vectorizer.generate_tf_idf(docs)
models = []
number_clusters = []
elbow_values = []
silhouette_values = []
db_values = []
init_n_cluster = 2
silhouette_path = os.path.join(base_path, "silhouette")
utils.create_if_not_exists(silhouette_path)
for i in range(init_n_cluster, 15):
number_clusters.append(i)
kmeans = function(vectors, i)
models.append(kmeans)
silhouette_avg = silhouette_score(
vectors.toarray(), kmeans.labels_)
sample_silhouette_values = silhouette_samples(
vectors.toarray(), kmeans.labels_)
db = davies_bouldin_score(vectors.toarray(), kmeans.labels_)
elbow_values.append(kmeans.inertia_)
silhouette_values.append(silhouette_avg)
db_values.append(db)
fig_silhouette, _ = visualizer.silhouette_cluster_plot(
vectors,
kmeans.labels_,
kmeans.cluster_centers_,
i,
silhouette_avg,
sample_silhouette_values)
fig_silhouette.savefig(os.path.join(
silhouette_path, "%d_clusters.png" % i))
plt.close(fig_silhouette)
print("Saving silhouette of cluster %d" % i)
choose_cluster_elbow = clusters.ellbow_optimal_number_of_clusters(
elbow_values)
ex_fig_silhouette_score, _ = visualizer.satter_graph_metrics(
silhouette_values, number_clusters, "Silhouette Score")
ex_fig_silhouette_score.show()
choose_cluster_sihlouette = int(input("sihlouette\n"))
plt.close(ex_fig_silhouette_score)
ex_fig_db, _ = visualizer.satter_graph_metrics(
db_values, number_clusters, "Davies-Bouldin Score")
ex_fig_db.show()
choose_cluster_db = int(input("davies bouldin\n"))
plt.close(ex_fig_db)
print("best elbow cluster is %d" % choose_cluster_elbow)
print("best sihouette cluster is %d" % choose_cluster_sihlouette)
print("best db cluster is %d" % choose_cluster_db)
x_label = "Número de clusters"
fig_elbow_score, _ = visualizer.satter_graph_metrics(
elbow_values, number_clusters,
"Método Elbow %s" % type_model,
contrast=[number_clusters.index(choose_cluster_elbow)],
y_label="Variação Explicada",
x_label=x_label
)
fig_elbow_score.savefig(os.path.join(base_path, "elbow_score.png"))
plt.close(fig_elbow_score)
print("saving elbow score")
fig_silhouette_score, _ = visualizer.satter_graph_metrics(
silhouette_values,
number_clusters, "Método Silhueta %s" % type_model,
contrast=[number_clusters.index(choose_cluster_sihlouette)],
y_label="Silhueta",
x_label=x_label
)
fig_silhouette_score.savefig(os.path.join(
base_path, "silhouette_score.png"))
plt.close(fig_silhouette_score)
print("saving sihouette score")
fig_db, _ = visualizer.satter_graph_metrics(
db_values, number_clusters,
"Método Davies-Bouldin %s" % type_model,
contrast=[number_clusters.index(choose_cluster_db)],
y_label="Davies-Bouldin",
x_label=x_label
)
fig_db.savefig(os.path.join(base_path, "db_score.png"))
plt.close(fig_db)
print("saving db score")
chosed_clusters = list(set([choose_cluster_elbow,
choose_cluster_sihlouette, choose_cluster_db]))
labels = {"journal": [], "subject": []}
for label in ['journal', 'subject']:
y = [self.corpus.categ.get(fileid, label) for fileid in fileids]
labels[label] = y
for clst in chosed_clusters:
model = models[number_clusters.index(clst)]
fig, _ = visualizer.kmeans_visualizer(
vectors, y, model.labels_, model.cluster_centers_, "Scatter %s %d Clusters por %s" % (type_model, clst, self.labels_title[label]))
fig.savefig(os.path.join(base_path, "%s-%d" % (label, clst)))
print("saving labeled %s cluster %d" % (label, clst))
del y
pickle_path = self.__get_pickle_path(
os.path.join("clusters", type_model))
for clst in chosed_clusters:
model = models[number_clusters.index(clst)]
df = pd.DataFrame({
"fileid": fileids,
"cluster": model.labels_,
"journal": labels["journal"],
"subject": labels["subject"]})
df.to_csv(os.path.join(
base_path, "result-model-cluster-%d.csv" % clst))
for label in ['journal', 'subject']:
gr = df.groupby(["cluster", label]).size()
gr.to_csv(
(os.path.join(base_path, "group-by-%s=model-cluster-%d.csv" % (label, clst))))
utils.save_joblib(pickle_path, "df-model-cluster-%d" % clst, df)
utils.save_joblib(
pickle_path, "model-cluster-%s.joblib" % clst, model)
def types_kmeans(self, tokens_type="stem"):
self.clusers_plot("kmeans", clusters.generate_kmeans, tokens_type)
self.clusers_plot("mini-batch-kmeans",
clusters.generate_mini_bath_kmeans, tokens_type)
def mean_shift_plot(self, tokens_type="stem"):
base_path = self.__get_docs_path(
os.path.join("clusters", "mean-shift"))
corpus = self.get_tokens(tokens_type)
docs, fileids = zip(*corpus)
vectors, _ = vectorizer.generate_tf_idf(docs)
mn = clusters.generate_mean_shift(vectors.toarray())
fig, _ = visualizer.cluster_docs_plot(
vectors, mn.labels_, mn.cluster_centers_)
fig.savefig(os.path.join(base_path, "cluser-plot.png"))
plt.close(fig)
labels = {"journal": [], "subject": []}
for label in ['journal', 'subject']:
y = [self.corpus.categ.get(fileid, label) for fileid in fileids]
labels[label] = y
fig, _ = visualizer.kmeans_visualizer(
vectors, y, mn.labels_, mn.cluster_centers_, "Scatter Mean Shift por %s" % label)
fig.savefig(os.path.join(base_path, "%s.png" % label))
plt.close(fig)
del y
print("saving labeled %s cluster" % label)
pickle_path = self.__get_pickle_path(
os.path.join("clusters", "mean-shift"))
df = pd.DataFrame({
"fileid": fileids,
"cluster": mn.labels_,
"journal": labels["journal"],
"subject": labels["subject"]})
df.to_csv(os.path.join(
base_path, "result-model-cluster-mean-shift.csv"))
for label in ['journal', 'subject']:
gr = df.groupby(["cluster", label]).size()
gr.to_csv(
(os.path.join(base_path, "group-by-%s-model-cluster-mean-shift.csv" % (label))))
utils.save_joblib(pickle_path, "df-model-cluster-mean-shift", df)
utils.save_joblib(
pickle_path, "model-cluster-mean-shift.joblib", mn)
def afinity_plot(self, tokens_type="stem"):
base_path = self.__get_docs_path(
os.path.join("clusters", "afinity"))
corpus = self.get_tokens(tokens_type)
docs, fileids = zip(*corpus)
vectors, _ = vectorizer.generate_tf_idf(docs)
af = clusters.generate_affinity_propagation(vectors.toarray())
fig, _ = visualizer.cluster_docs_plot(
vectors, af.labels_, af.cluster_centers_)
fig.savefig(os.path.join(base_path, "cluser-plot.png"))
plt.close(fig)
labels = {"journal": [], "subject": []}
for label in ['journal', 'subject']:
y = [self.corpus.categ.get(fileid, label) for fileid in fileids]
labels[label] = y
fig, _ = visualizer.kmeans_visualizer(
vectors, y, af.labels_, af.cluster_centers_, "Scatter Afinity por %s" % label)
fig.savefig(os.path.join(base_path, "%s.png" % label))
plt.close(fig)
del y
print("saving labeled %s cluster" % label)
pickle_path = self.__get_pickle_path(
os.path.join("clusters", "afinity"))
df = pd.DataFrame({
"fileid": fileids,
"cluster": af.labels_,
"journal": labels["journal"],
"subject": labels["subject"]})
df.to_csv(os.path.join(
base_path, "result-model-cluster-afinity.csv"))
for label in ['journal', 'subject']:
gr = df.groupby(["cluster", label]).size()
gr.to_csv(
(os.path.join(base_path, "group-by-%s-model-cluster-afinity.csv" % (label))))
utils.save_joblib(pickle_path, "df-model-cluster-afinity", df)
utils.save_joblib(
pickle_path, "model-cluster-afinity.joblib", af)
def agglomerative_plot(self, tokens_type="stem"):
base_path = self.__get_docs_path(
os.path.join("clusters", "agglomerative"))
corpus = self.get_tokens("stem")
docs, fileids = zip(*corpus)
vectors, _ = vectorizer.generate_tf_idf(docs)
pickle_path = self.__get_pickle_path(
os.path.join("clusters", "agglomerative"))
for algorithm in ['ward', 'complete', 'average', 'single']:
ag = clusters.generate_agglomerative(vectors.toarray(), algorithm)
y = [self.corpus.categ.get(fileid, "subject")
for fileid in fileids]
utils.save_joblib(
pickle_path, "model-cluster-agglomerative-%s.joblib" % algorithm, ag)
fig, _ = visualizer.plot_dendrogram(ag, y)
fig.savefig(os.path.join(
base_path, "%s-dendogram.png" % algorithm))
print("saving dendogram %s cluster" % algorithm)
def default_categ_results(self, base_path, y_test, y_pred, name, theme, title=""):
cfm, clr, ac = categorization.evaluating(y_test, y_pred)
print(cfm, clr, ac)
fig, _ = visualizer.plot_confusion_matrix(cfm, y_test, title)
fig.savefig(os.path.join(base_path, "confusion-matix-%s-%s.png" %
(name, theme)))
plt.close(fig)
with open(os.path.join(base_path, "report-%s-%s.txt" % (name, theme)), "w") as file:
file.write(clr)
file.write("\naccuracy score: %s" % str(ac))
def analysis_nb_categ(self, tokens_type="stem"):
base_path = self.__get_docs_path(
os.path.join("categorization", "nb"))
pickle_path = self.__get_pickle_path(
os.path.join("categorization", "nb"))
tokens = self.get_tokens(tokens_type)
docs, fileids = zip(*tokens)
vectors, _ = vectorizer.generate_tf_idf(docs)
for theme in ['subject']:
y = [self.corpus.categ.get(fileid, theme) for fileid in fileids]
for name, alg in [("Gaussian ", categorization.generate_gaussian_nb),
("Multinomial", categorization.generate_multinomial_nb),
("Complement", categorization.generate_complement_nb),
("Bernoulli", categorization.generate_bernoulli_nb),
("Categorical", categorization.generate_categorical_nb)
]:
model, (y_test, y_pred) = alg(vectors.toarray(), y)
utils.save_joblib(
pickle_path, "model-nb-%s-%s.joblib" % (theme, name), model)
self.default_categ_results(
base_path, y_test, y_pred, name, theme, "Heatmap %s Naive Bayes por %s" % (name, self.labels_title[theme]))
def analysis_svm_categ(self, tokens_type="stem"):
base_path = self.__get_docs_path(
os.path.join("categorization", "svm"))
pickle_path = self.__get_pickle_path(
os.path.join("categorization", "svm"))
tokens = self.get_tokens(tokens_type)
docs, fileids = zip(*tokens)
X, _ = vectorizer.generate_tf_idf(docs)
for theme in ['subject']:
y = [self.corpus.categ.get(fileid, theme) for fileid in fileids]
for kernel in ["linear", "poly", "rbf", "sigmoid"]:
model, (y_test, y_pred) = categorization.generate_svm(
X.toarray(), y, kernel)
utils.save_joblib(
pickle_path, "model-svm-%s-%s.joblib" % (theme, kernel), model)
self.default_categ_results(
base_path, y_test, y_pred, kernel, theme, "Heatmap Svm kernel %s por %s" % (kernel, self.labels_title[theme]))
def analysis_tree(self, tokens_type="stem"):
base_path = self.__get_docs_path(
os.path.join("categorization", "tree"))
pickle_path = self.__get_pickle_path(
os.path.join("categorization", "tree"))
tokens = self.get_tokens(tokens_type)
docs, fileids = zip(*tokens)
X, _ = vectorizer.generate_tf_idf(docs)
for theme in ['subject']:
y = [self.corpus.categ.get(fileid, theme) for fileid in fileids]
model, (y_test, y_pred) = categorization.generate_tree(
X.toarray(), y)
utils.save_joblib(
pickle_path, "model-tree-%s.joblib" % theme, model)
self.default_categ_results(
base_path, y_test, y_pred, "tree", theme, "Heatmap Tree por %s" % self.labels_title[theme])
def analysis_nns(self, tokens_type="stem"):
base_path = self.__get_docs_path(
os.path.join("categorization", "nns"))
pickle_path = self.__get_pickle_path(
os.path.join("categorization", "nns"))
tokens = self.get_tokens(tokens_type)
docs, fileids = zip(*tokens)
X, _ = vectorizer.generate_tf_idf(docs)
for theme in ['subject']:
for num_layers in [50, 150]:
for num_hidden in [0, 2, 5]:
y = [self.corpus.categ.get(fileid, theme)
for fileid in fileids]
model, hist, (y_test, y_pred) = categorization.generate_nn(
X.toarray(),
y,
num_layers=num_layers,
num_hidden_layers=num_hidden)
name = "(%d)_dense-(%d)_hidden" % (num_layers, num_hidden)
utils.create_if_not_exists(pickle_path)
model.save(os.path.join(
pickle_path, "model-nn-%s-%s.h5" % (theme, name)))
self.default_categ_results(
base_path, y_test, y_pred, theme, name, "Heatmap Nn %s por %s" % (name, self.labels_title[theme]))
with open(os.path.join(base_path, "%s-%s.txt" % (theme, name)), "w") as file:
with redirect_stdout(file):
model.summary()
def make_lsi(self, tokens, num_topics, type_transformation):
return topics.generate_lsi_model(
tokens, num_topics=num_topics, type_transformation=type_transformation)
def make_lda(self, tokens, num_topics, type_transformation):
return topics.generate_lda_model(
tokens, num_topics=num_topics, type_transformation=type_transformation)
def report_topics(self, model, base_path, theme, type_transformation, num_topics):
with open(os.path.join(base_path,
"report-lda-%s-%d-topics-%s.txt" % (type_transformation, num_topics, theme)), "w") as f:
for topic_id, topic in model.print_topics(num_topics=num_topics, num_words=20):
f.write('Topic #'+str(topic_id+1)+':')
f.write(topic + "\n")
def analysis_topic_func(self, base_path, pickle_path, docs, func, theme, type_transformation, name, **kwargs):
range_start = kwargs["range_start"] if "range_start" in kwargs else 2
range_end = kwargs["range_end"] if "range_end" in kwargs else 20
step = kwargs["step"] if "step" in kwargs else 2
models = []
dictionaries = []
tcorpuses = []
coherence_models = []
coherence_scores = []
topics_available = list(range(range_start, range_end, step))
for num_topics in topics_available:
print("Making %d tranformation %s for theme %s" %
(num_topics, type_transformation, theme))
model, dictionary, transformed_corpus = func(
docs, num_topics, type_transformation)
coeherence_model = topics.generate_coeherence_model(
model, transformed_corpus, docs, dictionary)
coherence_scores.append(coeherence_model.get_coherence())
models.append(model)
coherence_models.append(coeherence_model)
dictionaries.append(dictionary)
tcorpuses.append(transformed_corpus)
fig, _ = visualizer.coeherence_score(
range_start, range_end, step, coherence_scores, "Coeherence for %s %s %s" % (name, type_transformation, theme))
fig.savefig(os.path.join(
base_path, "coeherence-%s-%s.png" % (type_transformation, theme)))
fig.show()
choose_num_topic = int(input("Num Topic\n"))
plt.close(fig)
calculated_index_topic = int((choose_num_topic - range_start)/step)
choosed_model = models[calculated_index_topic]
coherence_model = coherence_models[calculated_index_topic]
dictionary = dictionaries[calculated_index_topic]
tcorpus = tcorpuses[calculated_index_topic]
coherence_score = coherence_scores[calculated_index_topic]
with open(os.path.join(base_path, "coeherence-topics-%s-%s-%s.txt" % (name, theme, type_transformation)), "w") as f:
f.write(str(coherence_score))
utils.create_if_not_exists(pickle_path)
choosed_model.save(os.path.join(
pickle_path, "model-%s-%s-%s.model" % (name, theme, type_transformation)))
coherence_model.save(os.path.join(
pickle_path, "coherence-model-%s-%s-%s.model" % (name, theme, type_transformation)))
dictionary.save(os.path.join(
pickle_path, "dictionary-%s-%s-%s.dict" % (name, theme, type_transformation)))
tcorpus.save(os.path.join(pickle_path, "tcorpus-%s-%s-%s.model" %
(name, theme, type_transformation)))
calculated_num_topics = topics_available[calculated_index_topic]
print("Calculated num topic available is %d" %
calculated_num_topics)
self.report_topics(choosed_model, base_path, theme,
type_transformation, calculated_num_topics)
del models
del coherence_scores
del docs
def analysis_topic(self, func, name, transformations=["tf-idf", "bow"]):
base_path = self.__get_docs_path(os.path.join("topics", name))
pickle_path = self.__get_pickle_path(
os.path.join("topics", name))
tokens = self.get_tokens("mwe")
docs, _ = zip(*tokens)
range_start = 4
range_end = 6
step = 2
for type_transformation in transformations:
self.analysis_topic_func(base_path, pickle_path, docs, func, "all", type_transformation, name,
range_start=range_start, range_end=range_end, step=step)
for theme in self.sujects:
print("making model for subject %s" % theme)
corpus = self.corpus.apply((preprocessor.tokenize_mwe,
preprocessor.clean_tokens,
preprocessor.remove_acentuation), categories=[theme])
self.analysis_topic_func(base_path, pickle_path, [list(doc) for doc, fileid in corpus], func, theme, type_transformation, name,
range_start=range_start, range_end=range_end, step=step)
del corpus
def analysis_lda(self, transformations=["tf-idf"]):
self.analysis_topic(self.make_lda, "lda", transformations)
def analysis_lsi(self, transformations=["tf-idf"]):
self.analysis_topic(self.make_lsi, "lsi", transformations)
def semantic(self):
base_path = self.__get_docs_path("semantic")
nlp = preprocessor.SpacyInstance.get_normal_instance()
sentences = ""
pos_sentences = ""
for theme in self.sujects:
print("Making Semantic for %s" % theme)
document = next(self.corpus.documents(categories=[theme]))
ndoc = nlp(document[0])
ehtml = displacy.render(ndoc, style='ent')
meta = "<meta http-equiv='Content-Type' content='text/html; charset=UTF-8'>"
ehtml = "%s%s" % (meta, ehtml)
imgkit.from_string(ehtml, os.path.join(
base_path, "%s-random_entity-%s.jpg" % (theme, document[1])), options={"encoding": "UTF-8"})
sent = list(ndoc.sents)[0]
dephtml = displacy.render(
sent, style="dep")
dephtml = "%s%s" % (meta, dephtml)
sentences += "%s%s" % (str(sent), "\n")
pos_sentences += "%s%s" % (str([(w, w.pos_) for w in sent]), "\n")
imgkit.from_string(dephtml, os.path.join(
base_path, "%s-dept-%s.jpg" % (theme, document[1])), options={"encoding": "UTF-8"})
with open(os.path.join(base_path, "sentences.txt"), "w") as file:
file.write(sentences)
with open(os.path.join(base_path, "pos_sentences.txt"), "w") as file:
file.write(pos_sentences)
def analysis_sentiment(self):
base_path = self.__get_docs_path("sentiment")
pickle_path = self.__get_pickle_path(
os.path.join("sentiment"))
tokens = self.get_tokens("mwe")
sent_corpus = []
count = 0
df = None
df_path = os.path.join(pickle_path, "df-sentiment")
if not os.path.exists(df_path):
for doc, fileid in tokens:
count = count + 1
print("fileid:", count)
print((count/1706) * 100, "%")
opl_cnt = Counter(sentiment.categs_pol_oplexicon_document(doc))
sent_cnt = Counter(sentiment.categs_pol_sentilex_document(doc))
sent_corpus.append((
fileid,
self.corpus.categ.get(fileid, 'subject'),
self.corpus.categ.get(fileid, 'journal'),
sent_cnt[None],
sent_cnt[-1],
sent_cnt[1],
sent_cnt[0],
opl_cnt[None],
opl_cnt[-1],
opl_cnt[1],
opl_cnt[0]
))
df = pd.DataFrame(sent_corpus, columns=[
'fileid',
'subject',
'journal',
'sent_none',
'sent_neg',
'sent_pos',
'sent_neut',
'opl_none',
'opl_neg',
'opl_pos',
'opl_neut'
])
df['polarity_oplexicon'] = df['opl_pos'] - df['opl_neg']
df['polarity_sentilex'] = df['sent_pos'] - df['sent_neg']
df.to_pickle(os.path.join(pickle_path, "df-sentiment"))
else:
print("Loading sentiment df")
df = pd.read_pickle(df_path)
# df.to_csv(os.path.join(base_path, "df-sentiments.csv"))
dfopl = df[['subject', 'journal', 'polarity_oplexicon',
'opl_neg', 'opl_pos', 'opl_neut', 'opl_none']]
dfsent = df[['subject', 'journal', 'polarity_sentilex',
'sent_neg', 'sent_pos', 'sent_neut', 'sent_none']]
for theme in ['subject', 'journal']:
gdfopl = dfopl.groupby(theme).sum()
gdfsent = dfsent.groupby(theme).sum()
gdfopl.to_csv(os.path.join(
base_path, 'opl-groupby-%s.csv' % theme))
gdfsent.to_csv(os.path.join(
base_path, 'sent-groupby-%s.csv' % theme))
fig, _ = visualizer.plot_compare_bar(
('Oplexicon', list(gdfopl['polarity_oplexicon'].values)),
('Sentilex', list(gdfsent['polarity_sentilex'].values)),
gdfopl['polarity_oplexicon'].to_dict().keys(), "Comparativo de polaridade por lexicos e por %s" % self.labels_title[theme])
fig.savefig(os.path.join(
base_path, "compare_bar_plot_%s.png" % theme))
fig, _ = visualizer.plot_compare_bar(
('Oplexicon', list(gdfopl['opl_none'].values)),
('Sentilex', list(gdfsent['sent_none'].values)),
gdfsent['sent_none'].to_dict().keys(), "Comparativo de perda por lexicos e por %s" % self.labels_title[theme], "Quantidade")
fig.savefig(os.path.join(
base_path, "missing_compare_bar_plot_%s.png" % theme))
for name, _gdf in [('Oplexicon', gdfopl), ('Sentliex', gdfsent)]:
gdf = _gdf.copy()
gdf.columns = ['pol', 'neg', 'pos', 'neut', 'none']
tgdf = gdf.transpose()
result = {key: [value['neg'], value['neut'], value['pos']]
for key, value in tgdf.to_dict().items()}
fig, _ = visualizer.compare_survey(
result, ['Negativo', 'Neutro', 'Positivo'], "Distribuição Polaridade %s por %s" % (name, theme))
fig.savefig(os.path.join(
base_path, "comapre_dist_bar_plot_%s_%s.png" % (theme, name)))
|
py | b40dd279541757285a5f13c596ae79355386efd8 | #
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions available in SMV's Python shell
"""
from inspect import formatargspec, getargspec
import sys
from smv import SmvApp
from test_support.test_runner import SmvTestRunner
from test_support.testconfig import TestConfig
from pyspark.sql import DataFrame
def _jvmShellCmd():
return SmvApp.getInstance()._jvm.org.tresamigos.smv.shell.ShellCmd
def df(name, forceRun=False, version=None, runConfig=None, quickRun=False):
"""The DataFrame result of running the named module
Args:
name (str): The unique name of a module. Does not have to be the FQN.
forceRun (bool): True if the module should be forced to run even if it has persisted output. False otherwise.
version (str): The name of the published version to load from
runConfig (dict): runtime configuration to use when running the module
quickRun (bool): skip computing dqm+metadata and persisting csv
Returns:
(DataFrame): The result of running the named module.
"""
return SmvApp.getInstance().runModuleByName(name, forceRun, version, runConfig, quickRun)[0]
def props():
"""The current app propertied used by SMV after the app, user, command-line
and dynamic props are merged.
Returns:
(dict): The 'mergedProps' or final props used by SMV
"""
return SmvApp.getInstance().getCurrentProperties()
def dshash(name, runConfig=None):
"""The current hashOfHash for the named module as a hex string
Args:
name (str): The uniquen name of a module. Does not have to be the FQN.
runConfig (dict): runConfig to apply when collecting info. If module
was run with a config, the same config needs to be
specified here to retrieve the correct hash.
Returns:
(int): The hashOfHash of the named module
"""
return SmvApp.getInstance().getDsHash(name, runConfig)
def getModel(name, forceRun = False, version = None):
"""Get the result of running the named SmvModel module
Args:
name (str): The name of a module. Does not have to be the FQN.
forceRun (bool): True if the module should be forced to run even if it has persisted output. False otherwise.
version (str): The name of the published version to load from
Returns:
(object): The result of running the named module
"""
app = SmvApp.getInstance()
urn = app.inferUrn(name)
return app.getModuleResult(urn, forceRun, version)
def openHive(tableName):
"""Read in a Hive table as a DataFrame
Args:
tableName (str): The name of the Hive table
Returns:
(DataFrame): The resulting DataFrame
"""
return DataFrame(_jvmShellCmd().openHive(tableName), SmvApp.getInstance().sqlContext)
def openCsv(path, validate=False):
"""Read in a CSV file as a DataFrame
Args:
path (str): The path of the CSV file
validate (bool): If true, validate the CSV before return DataFrame (raise error if malformatted)
Returns:
(DataFrame): The resulting DataFrame
"""
app = SmvApp.getInstance()
jdf = app.j_smvPyClient.shellOpenCsv(path, validate)
return DataFrame(jdf, SmvApp.getInstance().sqlContext)
def smvExportCsv(name, path):
"""Export the result of a module to a CSV file at a local path
Args:
fqn (str): the name of the module
path (str): a path on the local file system
"""
_jvmShellCmd().smvExportCsv(name, path, None)
def help():
"""Print a list of the SMV helper functions available in the shell
"""
this_mod = sys.modules[__name__]
help_msg = "SMV shell commands:"
for func_name in __all__:
func = getattr(this_mod, func_name)
signature = formatargspec(*getargspec(func))
help_msg += "\n* {}{}".format(func_name, signature)
smv_version = SmvApp.getInstance().j_smvApp.smvVersion()
doc_url = ("http://tresamigossd.github.io/SMV/pythondocs/{}/smv.html#module-smv.smvshell"
.format(smv_version))
help_msg += "\nDocumentation may be found at " + doc_url
print(help_msg)
def lsStage():
"""List all the stages
"""
print(_jvmShellCmd().lsStage())
def ls(stageName = None):
"""List all datasets in a stage
Args:
stageName (str): The name of the stage. Defaults to None, in which ase all datasets in all stages will be listed.
"""
if(stageName is None):
print(_jvmShellCmd().ls())
else:
print(_jvmShellCmd().ls(stageName))
def lsDead(stageName = None):
"""List dead datasets in a stage
Args:
stageName (str): The name of the stage. Defaults to None, in which ase all datasets in all stages will be listed.
"""
if(stageName is None):
print(_jvmShellCmd().lsDead())
else:
print(_jvmShellCmd().lsDead(stageName))
def lsDeadLeaf(stageName = None):
"""List 'deadLeaf' datasets in a stage
A 'deadLeaf' dataset is dataset for which "no modules in the stage depend
on it, excluding Output modules"
Note: a `deadLeaf` dataset must be `dead`, but some `dead` datasets aren't
`leaves`.
Args:
stageName (str): The name of the stage. Defaults to None, in which ase all datasets in all stages will be listed.
"""
if(stageName is None):
print(_jvmShellCmd().lsDeadLeaf())
else:
print(_jvmShellCmd().lsDeadLeaf(stageName))
def exportToHive(dsname, runConfig=None):
"""Export dataset's running result to a Hive table
Args:
dsname (str): The name of an SmvDataSet
"""
SmvApp.getInstance().publishModuleToHiveByName(dsname, runConfig)
def ancestors(dsname):
"""List all ancestors of a dataset
Ancestors of a dataset are the dataset it depends on, directly or
in-directly, including datasets from other stages.
Args:
dsname (str): The name of an SmvDataSet
"""
print(_jvmShellCmd().ancestors(dsname))
def descendants(dsname):
"""List all descendants of a dataset
Descendants of a dataset are the datasets which depend on it directly or
in-directly, including datasets from other stages
Args:
dsname (str): The name of an SmvDataSet
"""
print(_jvmShellCmd().descendants(dsname))
def graph(stageName = None):
"""Print ascii graph of all datasets in a given stage or all stages
Args:
stageName (str): Optional name of stage to graph. Do not
"""
if(stageName is None):
print(_jvmShellCmd()._graph())
else:
print(_jvmShellCmd()._graph(stageName))
def graphStage():
"""Print ascii graph of all stages (not datasets)
"""
print(_jvmShellCmd()._graphStage())
def now():
"""Print current time
"""
print(_jvmShellCmd().now())
def smvDiscoverSchemaToFile(path, n=100000, ca=None):
"""Try best to discover Schema from raw Csv file
Will save a schema file with postfix ".toBeReviewed" in local directory.
Args:
path (str): Path to the CSV file
n (int): Number of records to check for schema discovery, default 100k
ca (CsvAttributes): Defaults to CsvWithHeader
"""
SmvApp.getInstance()._jvm.SmvPythonHelper.smvDiscoverSchemaToFile(path, n, ca or SmvApp.getInstance().defaultCsvWithHeader())
def edd(ds_name):
"""Print edd report for the result of an SmvDataSet
Args:
ds_name (str): name of an SmvDataSet
"""
report = _jvmShellCmd()._edd(ds_name)
print(report)
def run_test(test_name):
"""Run a test with the given name without creating new Spark context
First reloads SMV and the test from source, then runs the test.
Args:
test_name (str): Name of the test to run
"""
# Ensure TestConfig has a canonical SmvApp (this will eventually be used
# to restore the singleton SmvApp)
TestConfig.setSmvApp(SmvApp.getInstance())
first_dot = test_name.find(".")
if first_dot == -1:
test_root_name = test_name
else:
test_root_name = test_name[:first_dot]
_clear_from_sys_modules(["smv", test_root_name])
SmvTestRunner("src/test/python").run([test_name])
def _clear_from_sys_modules(names_to_clear):
"""Clear smv and the given names from sys.modules (don't clear this module)
"""
for name in sys.modules.keys():
for ntc in names_to_clear:
if name != "smv.smvshell" and (name.startswith(ntc + ".") or name == ntc):
sys.modules.pop(name)
break
def show_run_info(collector):
"""Inspects the SmvRunInfoCollector object returned by smvApp.runModule"""
collector.show_report()
def get_run_info(name, runConfig=None):
"""Get the SmvRunInfoCollector with full information about a module and its dependencies
Args:
name (str): name of the module whose information to collect
runConfig (dict): runConfig to apply when collecting info. If module
was run with a config, the same config needs to be
specified here to retrieve the info.
"""
return SmvApp.getInstance().getRunInfoByPartialName(name, runConfig)
__all__ = [
'df',
'dshash',
'getModel',
'openHive',
'openCsv',
'smvExportCsv',
'help',
'lsStage',
'ls',
'lsDead',
'lsDeadLeaf',
'props',
'exportToHive',
'ancestors',
'descendants',
'graph',
'graphStage',
'now',
'smvDiscoverSchemaToFile',
'edd',
'run_test',
'show_run_info',
'get_run_info'
]
|
py | b40dd38245d1e58638b14ca478aa2a098782dad6 | def foo():
for a<caret>
pass |
py | b40dd3adf80aa2c3a8dd393ca94af7366356b7f4 | import numpy as np
def y_grayscale(rgb_image):
"""Return a grayscale version of the RGB image, using the Y channel of the YCbCr color space."""
# For details on the YCbCr color space, see:
# https://en.wikipedia.org/wiki/YCbCr
# https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.601-7-201103-I!!PDF-E.pdf
return (.299 * rgb_image[:, :, 0] +
.587 * rgb_image[:, :, 1] +
.114 * rgb_image[:, :, 2]).round().astype(np.uint8)
|
py | b40dd3f5f476b8994f2843ca17c4a078b1d39351 | """This file contains techniques for extracting data from HTML pages."""
from parsel import Selector
class Technique(object):
def __init__(self, extractor=None, *args, **kwargs):
"""Capture the extractor this technique is running within, if any."""
self.extractor = extractor
super(Technique, self).__init__(*args, **kwargs)
def extract(self, html):
"""Extract data from a string representing an HTML document."""
return {
'titles': [],
'descriptions': [],
'images': [],
'urls': [],
}
class HeadTags(Technique):
"""Extract info from standard HTML metatags like title, for example:
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<meta name="author" content="Will Larson" />
<meta name="description" content="Will Larson's blog about programming and other things." />
<meta name="keywords" content="Blog Will Larson Programming Life" />
<link rel="alternate" type="application/rss+xml" title="Page Feed" href="/feeds/" />
<link rel="canonical" href="http://lethain.com/digg-v4-architecture-process/">
<title>Digg v4's Architecture and Development Processes - Irrational Exuberance</title>
</head>
This is usually a last-resort, low quality, but reliable parsing mechanism.
"""
meta_name_map = {
"description": "descriptions",
"author": "authors",
}
def extract(self, html):
"""Extract data from meta, link and title tags within the head tag."""
extracted = {}
selector = Selector(html)
# extract data from title tag
title_tag = selector.xpath('//title/text()').extract_first()
if title_tag:
extracted['titles'] = [title_tag]
# extract data from meta tags
for meta_tag in selector.xpath('//meta'):
if 'name' in meta_tag.attrib and 'content' in meta_tag.attrib:
name = meta_tag.attrib['name']
if name in self.meta_name_map:
name_dest = self.meta_name_map[name]
if name_dest not in extracted:
extracted[name_dest] = []
extracted[name_dest].append(meta_tag.attrib['content'])
# extract data from link tags
for link_tag in selector.xpath('//link'):
if 'rel' in link_tag.attrib:
if ('canonical' in link_tag.attrib['rel'] or link_tag.attrib['rel'] == 'canonical') and 'href' in link_tag.attrib:
if 'urls' not in extracted:
extracted['urls'] = []
extracted['urls'].append(link_tag.attrib['href'])
elif ('alternate' in link_tag.attrib['rel'] or link_tag.attrib['rel'] == 'alternate') and 'type' in link_tag.attrib and link_tag.attrib['type'] == "application/rss+xml" and 'href' in link_tag.attrib:
if 'feeds' not in extracted:
extracted['feeds'] = []
extracted['feeds'].append(link_tag.attrib['href'])
return extracted
class FacebookOpengraphTags(Technique):
"""Extract info from html Facebook Opengraph meta tags.
Facebook tags are ubiquitous on high quality sites, and tend to be higher quality
than more manual discover techniques. Especially for picking high quality images,
this is probably your best bet.
Some example tags from `the Facebook opengraph docs <https://developers.facebook.com/docs/opengraphprotocol/>`::
<meta property="og:title" content="The Rock"/>
<meta property="og:type" content="movie"/>
<meta property="og:url" content="http://www.imdb.com/title/tt0117500/"/>
<meta property="og:image" content="http://ia.media-imdb.com/rock.jpg"/>
<meta property="og:site_name" content="IMDb"/>
<meta property="fb:admins" content="USER_ID"/>
<meta property="og:description"
content="A group of U.S. Marines, under command of
a renegade general, take over Alcatraz and
threaten San Francisco Bay with biological
weapons."/>
There are a bunch of other opengraph tags, but they don't seem
useful to extraction's intent at this point.
"""
key_attr = 'property'
property_map = {
'og:title': 'titles',
'og:url': 'urls',
'og:image': 'images',
'og:description': 'descriptions',
}
def extract(self, html):
"""Extract data from Facebook Opengraph tags."""
extracted = {}
selector = Selector(html)
for meta_tag in selector.xpath('//meta'):
if self.key_attr in meta_tag.attrib and 'content' in meta_tag.attrib:
property = meta_tag.attrib[self.key_attr]
if property in self.property_map:
property_dest = self.property_map[property]
if property_dest not in extracted:
extracted[property_dest] = []
extracted[property_dest].append(meta_tag.attrib['content'])
return extracted
class TwitterSummaryCardTags(FacebookOpengraphTags):
"""Extract info from the Twitter SummaryCard meta tags."""
key_attr = 'name'
property_map = {
'twitter:title': 'titles',
'twitter:description': 'descriptions',
'twitter:image': 'images',
}
class HTML5SemanticTags(Technique):
"""
The HTML5 `article` tag, and also the `video` tag give us some useful
hints for extracting page information for the sites which happen to
utilize these tags.
This technique will extract information from pages formed like::
<html>
<body>
<h1>This is not a title to HTML5SemanticTags</h1>
<article>
<h1>This is a title</h1>
<p>This is a description.</p>
<p>This is not a description.</p>
</article>
<video>
<source src="this_is_a_video.mp4">
</video>
</body>
</html>
Note that `HTML5SemanticTags` is intentionally much more conservative than
`SemanticTags`, as it provides high quality information in the small number
of cases where it hits, and otherwise expects `SemanticTags` to run sweep
behind it for the lower quality, more abundant hits it discovers.
"""
def extract(self, html):
"""Extract data from HTML5 semantic tags."""
titles = []
descriptions = []
videos = []
selector = Selector(html)
for article in selector.xpath('//article') or []:
title = article.xpath('string(.//h1)').extract()
print(title)
if title:
titles.append(' '.join(title))
desc = article.xpath('string(.//p)')
if desc:
descriptions.append(' '.join(desc.extract()))
for video in selector.xpath('.//video') or []:
for source in video.xpath('.//source') or []:
if 'src' in source.attrib:
videos.append(source.attrib['src'])
return {
'titles': titles,
'descriptions': descriptions,
'videos': videos
}
class SemanticTags(Technique):
"""
This technique relies on the basic tags themselves--for example,
all IMG tags include images, most H1 and H2 tags include titles,
and P tags often include text usable as descriptions.
This is a true last resort technique.
"""
# list to support ordering of semantics, e.g. h1
# is higher quality than h2 and so on
# format is ("name of tag", "destination list", store_first_n)
extract_string = [
('string(//h1)', 'titles', 3),
('string(//h2)', 'titles', 3),
('string(//h3)', 'titles', 1),
('string(//p)', 'descriptions', 5),
]
# format is ("name of tag", "destination list", "name of attribute" store_first_n)
extract_attr = [('//img', 'images', 'src', 10)]
def extract(self, html):
"""Extract data from usual semantic tags."""
extracted = {}
soup = Selector(html)
for tag, dest, max_to_store in self.extract_string:
for found in soup.xpath(tag)[:max_to_store] or []:
if dest not in extracted:
extracted[dest] = []
extracted[dest].append(found.extract())
for tag, dest, attribute, max_to_store in self.extract_attr:
for found in soup.xpath(tag)[:max_to_store] or []:
if attribute in found.attrib:
if dest not in extracted:
extracted[dest] = []
extracted[dest].append(found.attrib[attribute])
return extracted
|
py | b40dd4086f5ee37e420f291ddfa8d7439d973c4c | """Test utilities."""
from os.path import basename
import cobra
import numpy as np
import micom
import micom.util as util
from .fixtures import community
URL = "http://bigg.ucsd.edu/static/models/e_coli_core.xml.gz"
tax = micom.data.test_taxonomy()
def test_download(tmpdir):
print(tmpdir.dirpath())
util.download_model(URL, str(tmpdir))
assert tmpdir.join("e_coli_core.xml.gz").check()
model = util.load_model(URL)
assert len(model.reactions) == 95
assert len(model.metabolites) == 72
def test_load_model():
row = tax.loc[0]
model = util.load_model(row.file)
assert len(model.reactions) == 95
assert len(model.metabolites) == 72
def test_serialization(tmpdir):
row = tax.loc[0]
util.serialize_models([row.file], str(tmpdir))
assert tmpdir.join("e_coli_core.pickle").check()
def test_fluxes_from_primals(community):
community.solver.optimize()
fluxes = util.fluxes_from_primals(community, tax.loc[0])
assert len(fluxes) < len(community.reactions)
assert len(fluxes) == 95
def test_join_models():
single = util.load_model(tax.file[0])
single_coefs = {
v.name: coef for v, coef in
single.objective.get_linear_coefficients(single.variables).items()
}
mod = util.join_models(tax.file, id="test_model")
coefs = {
v.name: coef for v, coef in
mod.objective.get_linear_coefficients(mod.variables).items()
}
assert len(mod.reactions) == len(single.reactions) + 1 # added biomass
assert len(mod.metabolites) == len(single.metabolites)
assert np.allclose(single.slim_optimize(), mod.slim_optimize())
def test_compartment_id():
met = cobra.core.Metabolite(id="test_met_e__taxon")
met.community_id = "taxon"
met.global_id = "text_met_e"
met.compartment = "e__taxon"
assert util.compartment_id(met) == "e"
met.compartment = "C_e__taxon"
assert util.compartment_id(met) == "e"
met.global_id = "test_met[e]"
assert util.compartment_id(met) == "e"
met.global_id = "test_met(e)"
assert util.compartment_id(met) == "e"
|
py | b40dd5bea864e9f18d7f89bd7da25ac6d4524fba | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start kcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on kcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create kcoinds that connect to them
- Manipulate the kcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import KcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(KcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
py | b40dd6717debff9b47806386707b0728dcbe361d | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: usage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='usage.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0busage.proto\"$\n\x05Usage\x12\x0c\n\x04time\x18\x01 \x01(\x02\x12\r\n\x05usage\x18\x02 \x01(\x02\x62\x06proto3')
)
_USAGE = _descriptor.Descriptor(
name='Usage',
full_name='Usage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time', full_name='Usage.time', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='usage', full_name='Usage.usage', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=15,
serialized_end=51,
)
DESCRIPTOR.message_types_by_name['Usage'] = _USAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Usage = _reflection.GeneratedProtocolMessageType('Usage', (_message.Message,), dict(
DESCRIPTOR = _USAGE,
__module__ = 'usage_pb2'
# @@protoc_insertion_point(class_scope:Usage)
))
_sym_db.RegisterMessage(Usage)
# @@protoc_insertion_point(module_scope)
|
py | b40dd6760c6b1fca3787f96af2effc91af367c86 |
# -*- coding: utf-8 -*-
'''
File name: code\coin_partitions\sol_78.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #78 :: Coin partitions
#
# For more information see:
# https://projecteuler.net/problem=78
# Problem Statement
'''
Let p(n) represent the number of different ways in which n coins can be separated into piles. For example, five coins can be separated into piles in exactly seven different ways, so p(5)=7.
OOOOO
OOOO O
OOO OO
OOO O O
OO OO O
OO O O O
O O O O O
Find the least value of n for which p(n) is divisible by one million.
'''
# Solution
# Solution Approach
'''
'''
|
py | b40dd6ac60e9006f5c2b0484a27caac03f7c1128 | # ext/asyncio/__init__.py
# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .engine import AsyncConnection # noqa
from .engine import AsyncEngine # noqa
from .engine import AsyncTransaction # noqa
from .engine import create_async_engine # noqa
from .events import AsyncConnectionEvents # noqa
from .events import AsyncSessionEvents # noqa
from .result import AsyncMappingResult # noqa
from .result import AsyncResult # noqa
from .result import AsyncScalarResult # noqa
from .session import AsyncSession # noqa
from .session import AsyncSessionTransaction # noqa
|
py | b40dd84e6be4e19634e4af5c911d0e76caeeb977 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../../')
from rusentrel.classic.ctx.self_att_bilstm import run_testing_self_att_bilstm
from rusentrel.classic_cv.common import CV_COUNT, \
classic_cv_common_callback_modification_func, \
CV_NAME_PREFIX
if __name__ == "__main__":
run_testing_self_att_bilstm(
name_prefix=CV_NAME_PREFIX,
cv_count=CV_COUNT,
custom_callback_func=classic_cv_common_callback_modification_func)
|
py | b40dd86948b2de3b1337ce2d931dc47a27dfe317 | from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from db.db_connection import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=True)
surname = Column(String, nullable=True)
email = Column(String)
login = Column(String)
password = Column(String)
user_settings = relationship("UserSettings", back_populates="user")
site_settings = relationship("SiteSettings", back_populates="user")
def __repr__(self):
return f"<User(id:{self.id}, login:{self.login}, email:{self.email}, password:{self.password})>"
|
py | b40dd902fa13da85748db53a298104f14409be6c | #!/usr/bin/env python
"""
This module provides PrimaryDataset.GetID data access object.
"""
from dbs.dao.Oracle.PrimaryDataset.GetID import GetID as OraPrimaryDatasetGetID
class GetID(OraPrimaryDatasetGetID):
pass
|
py | b40dd959caacec9610b879d52b3cd68b9140a45c | """
MIT License
Copyright (c) 2022 Grapphy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from typing import List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from .types.wiki import WikiPage as WikiPagePayload
from .connection import Connector
class WikiPage:
__slots__ = (
"_connector",
"layout",
"locale",
"markdown",
"path",
"tags",
"title",
"available_locales",
"subtitle",
)
if TYPE_CHECKING:
_connector: Connector
available_locales: List[str]
layout: str
locale: str
markdown: str
path: str
subtitle: Optional[str]
tags: List[str]
title: str
def __init__(self, *, connector: Connector, data: WikiPagePayload) -> None:
self._connector = connector
self._update_data(data)
def __repr__(self) -> str:
return (
f"<WikiPage title={self.title} tags={self.tags}"
f" path={self.path} locale={self.locale}"
)
def __str__(self) -> str:
return f"{self.markdown}"
@property
def url(self) -> str:
return f"https://osu.ppy.sh/wiki/{self.locale}/{self.path}"
def _update_data(self, data: WikiPagePayload) -> None:
self.layout = data["layout"]
self.locale = data["locale"]
self.markdown = data["markdown"]
self.path = data["path"]
self.tags = data["tags"]
self.title = data["title"]
self.available_locales = data["available_locales"]
self.subtitle = data.get("subtitle")
|
py | b40dd9ae9840fae1c02eb586de44c0573ce2ecb4 | # Copyright (C) 2021 Satoshi Konno. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, List, Tuple, Optional, Dict
from .object import Object
class Node(object):
PORT = 3610
__address: Optional[Tuple[str, int]]
__objects: Dict[int, Object]
def __init__(self, addr: Optional[Tuple[str, int]] = None):
self.__address = ()
self.__objects = {}
self.set_address(addr)
def set_address(self, addr: Tuple[str, int]) -> bool:
if not isinstance(addr, tuple) or len(addr) != 2:
return False
self.__address = addr
return True
@property
def address(self) -> Optional[Tuple[str, int]]:
return self.__address
@property
def ip(self) -> str:
if len(self.__address) != 2:
return ""
return self.__address[0]
@property
def port(self) -> int:
if len(self.__address) != 2:
return 0
return Node.PORT
def add_object(self, obj: Object) -> bool:
if not isinstance(obj, Object):
return False
self.__objects[obj.code] = obj
obj.node = self
return True
@property
def objects(self) -> List[Object]:
objs = []
for obj in self.__objects.values():
objs.append(obj)
return objs
def has_object(self, code: int) -> bool:
return code in self.__objects.keys()
def get_object(self, code: Union[int, Tuple[int, int, int]]) -> Optional[Object]:
""" Returns the object specified the code.
Args:
code (Union[str, Tuple[int, int, int]]): A object code.
Returns:
Optional[Object]: Returns the object when the node has the object specified by the code, otherwise None.
"""
obj = Object(code)
try:
return self.__objects[obj.code]
except KeyError:
return None
|
py | b40dd9de34ba5f3c3625985ecbf3216f1c7a5e99 | # -*- coding: utf-8 -*-
"""
Base implementation of a torrent provider. New providers should mostly be ok
just overriding the fetch_releases method.
"""
import logging
from abc import ABCMeta, abstractmethod
from time import time
from tranny.app import config, Session
from tranny.models import Download
class TorrentProvider(object, metaclass=ABCMeta):
"""
Base torrent provider used to download new releases from trackers
"""
def __init__(self, config_section):
""" Provides a basic interface to generate new torrents from external services.
:param config_section:
:type config_section:
"""
# Timestamp of last successful update
self.enabled = False
self.last_update = 0
self._config_section = config_section
self.interval = config.get_default(config_section, "interval", 60, int)
self.log = logging.getLogger(config_section)
self.log.debug("Initialized {} Provider ({} State): {}".format(
self.__class__.__name__,
'Enabled' if self.enabled else 'Disabled', self.name)
)
@property
def name(self):
return self._config_section.split("_")[1]
def find_matches(self):
"""
:return:
:rtype:
"""
t0 = time()
delta = t0 - self.last_update
if not delta > self.interval or not self.enabled:
raise StopIteration
self.last_update = t0
session = Session()
for torrent, release_info in self.fetch_releases(session):
yield session, [torrent, release_info]
@abstractmethod
def fetch_releases(self, session):
"""
:param session:
:type session: sqlalchemy.orm.session.Session
"""
raise NotImplementedError("Must override this method")
def exists(self, session, release_key):
try:
return session.query(Download).filter_by(release_key="{}".format(release_key)).all()
except Exception as err:
self.log.exception(err)
return False
def is_replacement(self, release_info):
"""
:param release_info:
"""
fetch_proper = config.get_default("general", "fetch_proper", True, bool)
# Skip releases unless they are considered propers or repacks
if fetch_proper and not (release_info.is_repack or release_info.is_repack):
self.log.debug("Skipped previously downloaded release ({0}): {1}".format(
release_info.release_key, release_info.release_name))
return False
return True
|
py | b40ddaf664fe624c248b7ff1957b6cec970a57c7 | """Attachments interface
Direct access to the attachments endpoint
The user is not expected to use this class directly. It is an attribute of the
:class:`Archivist` class.
For example instantiate an Archivist instance and execute the methods of the class:
.. code-block:: python
with open(".auth_token", mode="r", encoding="utf-8") as tokenfile:
authtoken = tokenfile.read().strip()
# Initialize connection to Archivist
arch = Archivist(
"https://app.rkvst.io",
authtoken,
)
with open("something.jpg") as fd:
attachment = arch.attachments.upload(fd)
"""
# pylint:disable=too-few-public-methods
from typing import BinaryIO
import logging
from requests.models import Response
# pylint:disable=unused-import # To prevent cyclical import errors forward referencing is used
# pylint:disable=cyclic-import # but pylint doesn't understand this feature
from . import archivist as type_helper
from .constants import ATTACHMENTS_SUBPATH, ATTACHMENTS_LABEL
LOGGER = logging.getLogger(__name__)
class Attachment(dict):
"""Attachment
Attachment object has dictionary attributes.
"""
class _AttachmentsClient:
"""AttachmentsClient
Access to attachments entities using CRUD interface. This class is usually
accessed as an attribute of the Archivist class.
Args:
archivist (Archivist): :class:`Archivist` instance
"""
def __init__(self, archivist: "type_helper.Archivist"):
self._archivist = archivist
def upload(self, fd: BinaryIO, *, mtype: str = "image/jpg") -> Attachment:
"""Create attachment
Creates attachment from opened file or other data source.
Args:
fd (file): opened file descriptor or other file-type iterable.
mtype (str): mimetype of data.
Returns:
:class:`Attachment` instance
"""
LOGGER.debug("Upload Attachment")
return Attachment(
**self._archivist.post_file(
f"{ATTACHMENTS_SUBPATH}/{ATTACHMENTS_LABEL}",
fd,
mtype,
)
)
def download(self, identity: str, fd: BinaryIO) -> Response:
"""Read attachment
Reads attachment into data sink (usually a file opened for write)..
Note that returns the response as the body will be consumed by the
fd iterator
Args:
identity (str): attachment identity e.g. attachments/xxxxxxxxxxxxxxxxxxxxxxx
fd (file): opened file escriptor or other file-type sink..
Returns:
REST response
"""
return self._archivist.get_file(
ATTACHMENTS_SUBPATH,
identity,
fd,
)
|
py | b40ddb782df220f75228a298431bb092eebc728b | from __future__ import annotations
from . import actions
from . import card
from . import dealer
from . import deck
from . import engine
from . import player
from . import pot
from . import random_player
from . import state
from . import table
|
py | b40ddcdeeb3ddcec6741ab884b6b52c4d2e98f47 | #!/usr/bin/env python3
import sys
import apx
import argparse
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('input_file', nargs='*', help='Path to input file (.apx)')
arg_parser.add_argument('-o', dest='output_file', help='Output file name (default is to overwrite input file)', default=None)
arg_parser.add_argument('-p', '--pipe', help='Uses stdin and stdout instead of files', action='store_true', default=False)
args = arg_parser.parse_args()
if len(args.input_file) == 0 and not args.pipe:
arg_parser.print_help()
sys.exit(1)
output_file = args.output_file if args.output_file is not None else args.input_file[0]
if args.pipe:
node = apx.Parser().load(sys.stdin)
print(node.dumps(normalized=True))
else:
node = apx.Parser().parse(args.input_file[0])
node.save_apx(output_file = output_file, normalized=True) |
py | b40ddce7950ad05a5247374c011da1e0582f1723 | __author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3, 4, 5]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4, 5], 4: [3], 5: [3]}
rv.DOORS = ['d1', 'd2']
rv.DOORLOCATIONS = {(2, 3): 'd1', (3, 5): 'd2'}
rv.DOORTYPES = {'d1': 'spring', 'd2': 'spring'}
rv.ROBOTS = ['r1', 'r2', 'r3']
def ResetState():
state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL}
state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free'}
state.loc = {'r1': 1, 'r2': 1, 'r3': 1}
state.pos = {'o1': 4}
state.doorStatus = {'d1': 'closed', 'd2': 'closed', }
state.doorType = {'d1': UNK, 'd2': UNK, }
tasks = {
7: [['fetch', 'r1', 'o1', 2]],
}
eventsEnv = {
} |
py | b40ddd39e9e34e7ce4f9d5568b66c06e6d0cc547 | import copy
from cantoolz.module import CANModule
class ecu_switch(CANModule):
name = "CAN Switch"
help = """
This module emulating CAN Switch.
Init params (example):
{
'Cabin': { # From Cabin interface
'OBD2':[ # To OBD2 allowed next ID
0x81, # Left door status
0x82 # Right door status
],
},
'Engine': {
'OBD2': [
0x79,
0x709
],
'Cabin':[
0x79
]
},
'OBD2': {
'Engine':[
0x701
],
}
}
"""
_active = True
def do_init(self, params):
self._rules = params
# Effect (could be fuzz operation, sniff, filter or whatever)
def do_effect(self, can_msg, args):
current_rule = self._rules.get(args['pipe'], {})
if can_msg.CANData and args['action'] == "read": # READ
for route_to, allowed_id in current_rule.items():
if can_msg.CANFrame.frame_id in allowed_id:
buffer = self._rules[route_to].get('buffer', [])
buffer.append(copy.deepcopy(can_msg.CANFrame))
self._rules[route_to].update({'buffer': buffer})
elif args['action'] == "write" and not can_msg.CANData: # Write
buffer_len = len(current_rule.get('buffer', []))
if buffer_len > 0:
can_msg.CANFrame = self._rules[args['pipe']]['buffer'].pop(0)
can_msg.CANData = True
can_msg.bus = self._bus
return can_msg
|
py | b40dde7737f40e23af3a4a20c9f550a0ac17257b | from sklearn.model_selection import ParameterSampler, ParameterGrid
from scipy.stats.distributions import uniform
from subprocess import run
param_grid = {
"mutation_rate": uniform(),
"crossover_rate": uniform(),
"population_size": uniform(5, 100),
}
grid = ParameterSampler(param_grid, 5)
file_grid = {
"target": ["banth1", "ecol", "ngon", "paer"],
"insert": ["BoNT", "gfp", "human_HBB", "insulin", "luciferase", "oxytocin"],
}
file_grid = ParameterGrid(file_grid)
for params in grid:
print(params)
cmd = ["freqgen generate"]
cmd.append(f"-p {int(params['population_size'])}")
cmd.append(f"-m {params['mutation_rate']}")
cmd.append(f"-e 15")
cmd.append(f"-c {params['crossover_rate']}")
# cmd.append(f"--pop-count {int(params['population_count'])}")
cmd.append(f"--log --dna ")
for f in file_grid:
cmd_for_file = (
" ".join(cmd)
+ f"-s inserts/{f['insert']}.fasta "
+ f"-f targets/{f['target']}.yaml"
+ " > /dev/null"
)
print(cmd_for_file)
run(cmd_for_file, shell=True)
|
py | b40ddeacb4b762b72894a6c52ce61d910fe90f80 | import sys
import pandas as pd
from ftfy import fix_encoding
from math import sqrt
import unidecode
def fixerino(row, field):
if row[field] == row[field]:
a = unidecode.unidecode(fix_encoding(row[field]))
return "'{}'".format(a.strip().replace("'", ''))
else:
return 'undefined'
def parse_list(s):
if ',' in str(s):
return ','.join(filter(None, str(s).split(',')))
else:
return s
#paragem(gid,latitude,longitude,Estado de Conservacao,Tipo de Abrigo,Abrigo com Publicidade?,Operadora,[Carreira],Codigo de Rua,Nome da Rua,Freguesia').
def parse_paragens():
xl = pd.read_excel('paragem_autocarros_oeiras_processado_4.xlsx')
for index, row in xl.iterrows():
print("paragem({0},{1},{2},{3},{4},{5},{6},[{7}],{8},{9},{10}).".format(
row['gid'],
row['latitude'],
row['longitude'],
fixerino(row,'Estado de Conservacao'),
fixerino(row, 'Tipo de Abrigo'),
fixerino(row, 'Abrigo com Publicidade?'),
fixerino(row,'Operadora'),
parse_list(row['Carreira']),
row['Codigo de Rua'],
fixerino(row, 'Nome da Rua'),
fixerino(row, 'Freguesia')
))
def distancia(row, n_row):
x1 = row['latitude']
y1 = row['longitude']
x2 = n_row['latitude']
y2 = n_row['longitude']
d = sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))
if d == d:
return d
else:
return 'undefined'
# edge(gid_row, gid_next_row, carreira, distancia).
def parse_graph():
xl = pd.ExcelFile('lista_adjacencias_paragens.xlsx')
for sheet in xl.sheet_names:
book = xl.parse(sheet)
for i in range(book.shape[0]-1):
org = book.iloc[i]['gid']
dest = book.iloc[i + 1]['gid']
if org != dest:
print("edge({0},{1},{2},{3}).".format(
org,
dest,
book.iloc[i]['Carreira'],
distancia(book.iloc[i], book.iloc[i+1])
))
def main():
if len(sys.argv) == 2 and '--stops' in sys.argv[1].lower():
parse_paragens()
elif len(sys.argv) == 2 and '--edges' in sys.argv[1].lower():
parse_graph()
else:
print('USAGE: parse [--stops|--edges]')
main()
|
py | b40ddf042336452ac87d854630ea27a83d7e3fb9 | from flask import render_template
from . import auth
from flask import render_template,redirect,url_for,flash,request
from .forms import RegistrationForm,LoginForm
from .. import db
from flask_login import login_user,logout_user,login_required
from ..models import User
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user)
return redirect(request.args.get('next') or url_for('main.loggedin'))
flash('Invalid username or Password')
title = "SignIn | theBlog"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
#mail_message("Welcome to The blog","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "SignUp | theBlog"
return render_template('auth/signup.html',registration_form = form,title = title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index")) |
py | b40ddf6b8ef6a2699326f136bd1720ae4445c075 | from . import prop
from . import imaging
from . import wotf
from . import ledarray
from . import noise
from . import ransac
from . import simulation
from . import transformation
from . import error
from . import camera
from . import containers
|
py | b40ddfc9ac0675d9892a6214d763319cb8c63694 | # Django settings for DjangoAnalysisTestApp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'DjangoAnalysisTestApp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'DjangoAnalysisTestApp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'myapp',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
py | b40de072ba376bb9ac604cef0fc732efdfe4de81 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMethods in the Ruby Koans
#
from runner.koan import *
def my_global_function(a,b):
return a + b
class AboutMethods(Koan):
pass
# def test_calling_a_global_function(self):
# self.assertEqual(__, my_global_function(2,3))
# # NOTE: Wrong number of arguments is not a SYNTAX error, but a
# # runtime error.
# def test_calling_functions_with_wrong_number_of_arguments(self):
# try:
# my_global_function()
# except TypeError as exception:
# msg = exception.args[0]
# # Note, the text comparison works for Python 3.2
# # It has changed in the past and may change in the future
# self.assertRegex(msg,
# r'my_global_function\(\) missing 2 required positional arguments')
# try:
# my_global_function(1, 2, 3)
# except Exception as e:
# msg = e.args[0]
# # Note, watch out for parenthesis. They need slashes in front!
# self.assertRegex(msg, __)
# # ------------------------------------------------------------------
# def pointless_method(self, a, b):
# sum = a + b
# def test_which_does_not_return_anything(self):
# self.assertEqual(__, self.pointless_method(1, 2))
# # Notice that methods accessed from class scope do not require
# # you to pass the first "self" argument?
# # ------------------------------------------------------------------
# def method_with_defaults(self, a, b='default_value'):
# return [a, b]
# def test_calling_with_default_values(self):
# self.assertEqual(__, self.method_with_defaults(1))
# self.assertEqual(__, self.method_with_defaults(1, 2))
# # ------------------------------------------------------------------
# def method_with_var_args(self, *args):
# return args
# def test_calling_with_variable_arguments(self):
# self.assertEqual(__, self.method_with_var_args())
# self.assertEqual(('one',), self.method_with_var_args('one'))
# self.assertEqual(__, self.method_with_var_args('one', 'two'))
# # ------------------------------------------------------------------
# def function_with_the_same_name(self, a, b):
# return a + b
# def test_functions_without_self_arg_are_global_functions(self):
# def function_with_the_same_name(a, b):
# return a * b
# self.assertEqual(__, function_with_the_same_name(3,4))
# def test_calling_methods_in_same_class_with_explicit_receiver(self):
# def function_with_the_same_name(a, b):
# return a * b
# self.assertEqual(__, self.function_with_the_same_name(3,4))
# # ------------------------------------------------------------------
# def another_method_with_the_same_name(self):
# return 10
# link_to_overlapped_method = another_method_with_the_same_name
# def another_method_with_the_same_name(self):
# return 42
# def test_that_old_methods_are_hidden_by_redefinitions(self):
# self.assertEqual(__, self.another_method_with_the_same_name())
# def test_that_overlapped_method_is_still_there(self):
# self.assertEqual(__, self.link_to_overlapped_method())
# # ------------------------------------------------------------------
# def empty_method(self):
# pass
# def test_methods_that_do_nothing_need_to_use_pass_as_a_filler(self):
# self.assertEqual(__, self.empty_method())
# def test_pass_does_nothing_at_all(self):
# "You"
# "shall"
# "not"
# pass
# self.assertEqual(____, "Still got to this line" != None)
# # ------------------------------------------------------------------
# def one_line_method(self): return 'Madagascar'
# def test_no_indentation_required_for_one_line_statement_bodies(self):
# self.assertEqual(__, self.one_line_method())
# # ------------------------------------------------------------------
# def method_with_documentation(self):
# "A string placed at the beginning of a function is used for documentation"
# return "ok"
# def test_the_documentation_can_be_viewed_with_the_doc_method(self):
# self.assertRegex(self.method_with_documentation.__doc__, __)
# # ------------------------------------------------------------------
# class Dog:
# def name(self):
# return "Fido"
# def _tail(self):
# # Prefixing a method with an underscore implies private scope
# return "wagging"
# def __password(self):
# return 'password' # Genius!
# def test_calling_methods_in_other_objects(self):
# rover = self.Dog()
# self.assertEqual(__, rover.name())
# def test_private_access_is_implied_but_not_enforced(self):
# rover = self.Dog()
# # This is a little rude, but legal
# self.assertEqual(__, rover._tail())
# def test_attributes_with_double_underscore_prefixes_are_subject_to_name_mangling(self):
# rover = self.Dog()
# with self.assertRaises(___): password = rover.__password()
# # But this still is!
# self.assertEqual(__, rover._Dog__password())
# # Name mangling exists to avoid name clash issues when subclassing.
# # It is not for providing effective access protection
|
py | b40de105004b3f42d3810c7f25afc4e6d3d7cc98 | """
WSGI config for WebAppLAN_MonitorDjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'main_app.settings')
application = get_wsgi_application()
|
py | b40de1ad17a312665d971555f5b98069af2838b9 | import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist_data/",one_hot=True)
training_digits, training_labels = mnist.train.next_batch(5000)
test_digits,test_labels = mnist.test.next_batch(200)
training_digits_pl = tf.placeholder("float",[None,784])
test_digit_pl = tf.placeholder("float",[784])
#nearest neighbour calculation using L1 Distancte method
l1_distance = tf.abs(tf.add(training_digits_pl,tf.negative(test_digit_pl)))
distance = tf.reduce_sum(l1_distance,axis=1)
#prediction get min index (nearest neighbour)
pred= tf.arg_min(distance,0)
accuracy = 0.
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(len(test_digits)):
nn_index = sess.run(pred,\
feed_dict={training_digits_pl:training_digits,test_digit_pl:test_digits[i,:]})
print("Test",i,"Prediction:",np.argmax(training_labels[nn_index]),\
"true label:",np.argmax(test_labels[i]))
if np.argmax(training_labels[nn_index]) == np.argmax(test_labels[i]):
accuracy += 1./len(test_digits)
print("Done")
print("Accuracy: ",accuracy) |
py | b40de31926589d2df14a8c681ec64859756b24e8 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import BLOCK_HEADER_SIZE
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Focalcoin to node 1")
# Random address so node1's balance doesn't increase
not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ"
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("i", output.read(4))
response_hash = binascii.hexlify(output.read(32)[::-1]).decode('ascii')
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, 102) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), BLOCK_HEADER_SIZE)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = binascii.hexlify(resp_bytes[::-1]).decode('utf-8')
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
|
py | b40de35d8677e61a48d4dafc3753b1480bddf300 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Old tests for the incoming email handler."""
from constants import constants
from core.domain import feedback_services
from core.platform import models
from core.tests import test_utils
import feconf
(feedback_models, email_models) = models.Registry.import_models([
models.NAMES.feedback, models.NAMES.email])
class IncomingReplyEmailTests(test_utils.GenericTestBase):
USER_A_EMAIL = '[email protected]'
USER_B_EMAIL = '[email protected]'
def setUp(self):
super(IncomingReplyEmailTests, self).setUp()
self.signup(self.USER_A_EMAIL, 'A')
self.user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, 'B')
self.user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_reply_emails_are_added_to_thread(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
# Create thread.
with self.swap(
constants, 'ENABLE_GENERALIZED_FEEDBACK_THREADS', False):
feedback_services.create_thread(
feconf.ENTITY_TYPE_EXPLORATION, self.exploration.id,
'a_state_name', self.user_id_a, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
feconf.ENTITY_TYPE_EXPLORATION, self.exploration.id, False)
thread_id = threadlist[0].id
# Create another message.
feedback_services.create_message(
thread_id, self.user_id_b, None, None, 'user b message')
# Check that there are 2 messages in thread.
messages = feedback_services.get_messages(thread_id)
self.assertEqual(len(messages), 2)
# Check that received_via_email is set to False.
self.assertFalse(messages[0].received_via_email)
# Get reply_to id for user A.
model = email_models.FeedbackEmailReplyToIdModel.get(
self.user_id_a, thread_id)
recipient_email = 'reply+%s@%s' % (
model.reply_to_id, feconf.INCOMING_EMAILS_DOMAIN_NAME)
# Send email to Oppia.
self.post_email(
str(recipient_email), self.USER_A_EMAIL,
'feedback email reply', 'New reply')
# Check that new message is added.
messages = feedback_services.get_messages(thread_id)
self.assertEqual(len(messages), 3)
# Check content of message is correct.
msg = messages[-1]
self.assertEqual(msg.text, 'New reply')
self.assertEqual(msg.author_id, self.user_id_a)
self.assertTrue(msg.received_via_email)
|
py | b40de458052ebec7ff6880cd81a8d9d1bac13ffa | # -*- coding: utf-8 -*-
"""Provides classes to interface solvers with PyMzn.
PyMzn interfaces with solvers through the ``Solver`` base class. This class
includes the necessary infomation for PyMzn to setup the solver, together with
the ``solve`` and ``solve_start`` methods, which respectively take care of the
running or asynchronously starting a process that solves the MiniZinc/FlatZinc
model. PyMzn provides a number of solver implementations out-of-the-box.
PyMzn's default solver is ``pymzn.gecode``, an instance of `pymzn.Gecode`.
To use a solver that is not provided by PyMzn or to exend an existing one, one
has to subclass the `Solver` class and implement the ``args`` method, which
returns a list of command line arguments for executing the process. This is
generally enough for most solvers, but you can also directly reimplement the
``solve`` and ``solve_start`` methods for extra flexibility.
For instance::
from pymzn import Solver
from pymzn.process import Process
class MySolver(Solver):
def __init__(self, path='path/to/solver', globals_dir='path/to/gobals'):
super().__init__(globals_dir)
self.cmd = path
def args(self, fzn_file, *args, arg1=val1, arg2=val2, **kwargs):
return [self.cmd, '-arg1', arg1, '-arg2', arg2, fzn_file]
Then it is possible to run the ``minizinc`` function with the custom solver::
my_solver = MySolver()
pymzn.minizinc('test.mzn', solver=my_solver, arg1=val1, arg2=val2)
"""
import re
import logging
import pymzn.config as config
from ..process import Process
from subprocess import CalledProcessError
class Solver:
"""Abstract solver class.
All the solvers inherit from this base class.
Parameters
----------
globals_dir : str
The path to the directory for global included files.
"""
def __init__(
self, globals_dir='std', support_mzn=False, support_all=False,
support_num=False, support_timeout=False, support_output_mode=False,
support_stats=False
):
self.globals_dir = globals_dir
self.support_mzn = support_mzn
self.support_all = support_all
self.support_num = support_num
self.support_timeout = support_timeout
self.support_output_mode = support_output_mode
self.support_stats = support_stats
def args(*args, **kwargs):
"""Returns the command line arguments to start the solver"""
raise NotImplementedError()
def solve_start(self, *args, timeout=None, all_solutions=False, **kwargs):
"""Like `solve`, but returns a started Process"""
log = logging.getLogger(__name__)
if timeout and not self.support_timeout:
if not self.support_all:
raise ValueError('Timeout not supported')
all_solutions = True
solver_args = self.args(
*args, timeout=timeout, all_solutions=all_solutions, **kwargs
)
timeout = None if self.support_timeout else timeout
try:
log.debug('Starting solver with arguments {}'.format(solver_args))
return Process(solver_args).start(timeout=timeout)
except CalledProcessError as err:
log.exception(err.stderr)
raise RuntimeError(err.stderr) from err
def solve(self, *args, timeout=None, all_solutions=False, **kwargs):
"""Solve a problem encoded with MiniZinc/FlatZinc.
This method should call an external solver, wait for the solution and
provide the output of the solver. If the solver does not have a Python
interface, the ``pymzn.process`` module can be used to run external
executables.
If a solver does not support dzn output, then its PyMzn implementation
should take care of parsing the solver output and return a dzn
equivalent.
"""
log = logging.getLogger(__name__)
if timeout and not self.support_timeout:
if not self.support_all:
raise ValueError('Timeout not supported')
all_solutions = True
solver_args = self.args(
*args, timeout=timeout, all_solutions=all_solutions, **kwargs
)
timeout = None if self.support_timeout else timeout
try:
log.debug('Running solver with arguments {}'.format(solver_args))
process = Process(solver_args).run(timeout=timeout)
out = process.stdout_data
err = process.stderr_data
except CalledProcessError as err:
log.exception(err.stderr)
raise RuntimeError(err.stderr) from err
return out, err
class Gecode(Solver):
"""Interface to the Gecode solver.
Parameters
----------
mzn_path : str
The path to the mzn-gecode executable.
fzn_path : str
The path to the fzn-gecode executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(
self, mzn_path='mzn-gecode', fzn_path='fzn-gecode', globals_dir='gecode'
):
super().__init__(
globals_dir, support_mzn=True, support_all=True, support_num=True,
support_timeout=True, support_stats=True
)
self.mzn_cmd = mzn_path
self.fzn_cmd = fzn_path
def args(
self, mzn_file, *dzn_files, data=None, include=None, timeout=None,
all_solutions=False, num_solutions=None, output_mode='item', parallel=1,
seed=0, statistics=False, **kwargs
):
mzn = False
args = []
if mzn_file.endswith('fzn'):
args.append(self.fzn_cmd)
else:
mzn = True
args += [self.mzn_cmd, '-G', self.globals_dir]
if include:
if isinstance(include, str):
include = [include]
for path in include:
args += ['-I', path]
if data:
args += ['-D', data]
fzn_flags = []
if statistics:
args.append('-s')
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', str(num_solutions)]
if parallel != 1:
fzn_flags += ['-p', str(parallel)]
if timeout and timeout > 0:
timeout = timeout * 1000 # Gecode takes milliseconds
fzn_flags += ['-time', str(timeout)]
if seed != 0:
fzn_flags += ['-r', str(seed)]
if mzn and fzn_flags:
args += ['--fzn-flags', '{}'.format(' '.join(fzn_flags))]
else:
args += fzn_flags
args.append(mzn_file)
if mzn and dzn_files:
for dzn_file in dzn_files:
args.append(dzn_file)
return args
def solve(self, *args, suppress_segfault=False, **kwargs):
"""Solve a MiniZinc/FlatZinc problem with Gecode.
Parameters
----------
suppress_segfault : bool
Whether to accept or not a solution returned when a segmentation
fault has happened (this is unfortunately necessary sometimes due to
some bugs in Gecode).
"""
log = logging.getLogger(__name__)
solver_args = self.args(*args, **kwargs)
try:
log.debug('Running solver with arguments {}'.format(solver_args))
process = Process(solver_args).run()
out = process.stdout_data
err = process.stderr_data
except CalledProcessError as err:
if suppress_segfault and len(err.stdout) > 0 \
and err.stderr.startswith('Segmentation fault'):
log.warning('Gecode returned error code {} (segmentation '
'fault) but a solution was found and returned '
'(suppress_segfault=True).'.format(err.returncode))
out = err.stdout
else:
log.exception(err.stderr)
raise RuntimeError(err.stderr) from err
return out, err
class Chuffed(Solver):
"""Interface to the Chuffed solver.
Parameters
----------
mzn_path : str
The path to the mzn-chuffed executable.
fzn_path : str
The path to the fzn-chuffed executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(
self, mzn_path='mzn-chuffed', fzn_path='fzn-chuffed',
globals_dir='chuffed'
):
super().__init__(
globals_dir, support_mzn=True, support_all=True, support_num=True,
support_timeout=True
)
self.mzn_cmd = mzn_path
self.fzn_cmd = fzn_path
def args(
self, mzn_file, *dzn_files, data=None, include=None, timeout=None,
all_solutions=False, num_solutions=None, output_mode='item', seed=0,
**kwargs
):
mzn = False
args = []
if mzn_file.endswith('fzn'):
args.append(self.fzn_cmd)
else:
mzn = True
args += [self.mzn_cmd, '-G', self.globals_dir]
if include:
if isinstance(include, str):
include = [include]
for path in include:
args += ['-I', path]
if data:
args += ['-D', data]
fzn_flags = []
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', str(num_solutions)]
if timeout and timeout > 0:
fzn_flags += ['--time-out', str(timeout)]
if seed != 0:
fzn_flags += ['--rnd-seed', str(seed)]
if mzn and fzn_flags:
args += ['--fzn-flags', '"{}"'.format(' '.join(fzn_flags))]
else:
args += fzn_flags
args.append(mzn_file)
if mzn and dzn_files:
for dzn_file in dzn_files:
args.append(dzn_file)
return args
class Optimathsat(Solver):
"""Interface to the Optimathsat solver.
Parameters
----------
path : str
The path to the optimathsat executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(self, path='optimathsat', globals_dir='std'):
super().__init__(globals_dir, support_stats=True)
self.cmd = path
self._line_comm_p = re.compile('%.*\n')
self._rational_p = re.compile('(\d+)\/(\d+)')
def _parse_out(self, out, statistics=False):
stats = ''.join(self._line_comm_p.findall(out))
out = self._line_comm_p.sub(out, '')
for m in self._rational_p.finditer(out):
n, d = m.groups()
val = float(n) / float(d)
out = re.sub('{}/{}'.format(n, d), str(val), out)
if statistics:
return '\n'.join([out, stats])
return out
def args(self, fzn_file, *args, **kwargs):
return [self.cmd, '-input=fzn', fzn_file]
def solve(fzn_file, *args, statistics=False, **kwargs):
out, err = super().solve(fzn_file, *args, **kwargs)
return self._parse_out(out, statistics), err
def solve_start(self, *args, **kwargs):
raise NotImplementedError()
class Opturion(Solver):
"""Interface to the Opturion CPX solver.
Parameters
----------
path : str
The path to the fzn-cpx executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(self, path='fzn-cpx', globals_dir='opturion-cpx'):
super().__init__(globals_dir, support_all=True, support_stats=True)
self.cmd = path
def args(
self, fzn_file, *args, all_solutions=False, statistics=False, **kwargs
):
args = [self.cmd]
if all_solutions:
args.append('-a')
if statistics:
args.append('-s')
args.append(fzn_file)
return args
class MIPSolver(Solver):
"""Interface to the MIP solver.
Parameters
----------
path : str
The path to the mzn-gurobi executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(self, path='mzn-gurobi', globals_dir='linear'):
super().__init__(
globals_dir, support_mzn=True, support_all=True, support_num=True,
support_timeout=True, support_output_mode=True, support_stats=True
)
self.cmd = path
def args(
self, mzn_file, *dzn_files, data=None, include=None, timeout=None,
all_solutions=False, num_solutions=None, output_mode='item', parallel=1,
statistics=False, **kwargs
):
mzn = False
args = [self.cmd]
if mzn_file.endswith('fzn') and output_mode not in ['dzn', 'json']:
raise ValueError('Only dzn or json output available with fzn input')
else:
mzn = True
args += ['-G', self.globals_dir]
if include:
if isinstance(include, str):
include = [include]
for path in include:
args += ['-I', path]
if data:
args += ['-D', data]
if statistics:
args.append('-s')
if all_solutions:
args += ['-a', '--unique']
if num_solutions is not None:
args += ['-n', str(num_solutions)]
if parallel != 1:
args += ['-p', str(parallel)]
if timeout and timeout > 0:
args += ['--timeout', str(timeout)]
args += ['--output-mode', output_mode, mzn_file]
if mzn and dzn_files:
for dzn_file in dzn_files:
args.append(dzn_file)
return args
class Gurobi(MIPSolver):
"""Interface to the Gurobi solver.
Parameters
----------
path : str
The path to the mzn-cbc executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(self, path='mzn-gurobi', globals_dir='linear'):
super().__init__(path, globals_dir)
class CBC(MIPSolver):
"""Interface to the COIN-OR CBC solver.
Parameters
----------
path : str
The path to the mzn-cbc executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(self, path='mzn-cbc', globals_dir='linear'):
super().__init__(path, globals_dir)
class G12Solver(Solver):
"""Interface to a generic G12 solver.
Parameters
----------
mzn_path : str
The path to the mzn executable.
fzn_path : str
The path to the flatzinc executable.
globals_dir : str
The path to the directory for global included files.
backend : str
The backend code of the specific solver used.
"""
def __init__(
self, mzn_path='mzn-g12fd', fzn_path='flatzinc', globals_dir='g12_fd',
backend=None
):
super().__init__(
globals_dir, support_mzn=True, support_all=True, support_num=True,
support_stats=True
)
self.fzn_cmd = fzn_path
self.mzn_cmd = mzn_path
self.backend = backend
def args(
self, mzn_file, *dzn_files, data=None, include=None, statistics=False,
all_solutions=False, num_solutions=None, **kwargs
):
mzn = False
args = []
if mzn_file.endswith('fzn'):
args.append(self.fzn_cmd)
if self.backend:
args += ['-b', self.backend]
else:
mzn = True
args += [self.mzn_cmd, '-G', self.globals_dir]
if include:
if isinstance(include, str):
include = [include]
for path in include:
args += ['-I', path]
if data:
args += ['-D', data]
if statistics:
args.append('-s')
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', str(num_solutions)]
args.append(mzn_file)
if mzn and dzn_files:
for dzn_file in dzn_files:
args.append(dzn_file)
return args
class G12Fd(G12Solver):
"""Interface to the G12Fd solver.
Parameters
----------
mzn_path : str
The path to the mzn executable.
fzn_path : str
The path to the flatzinc executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(
self, mzn_path='mzn-g12fd', fzn_path='flatzinc', globals_dir='g12_fd'
):
super().__init__(mzn_path, fzn_path, globals_dir)
class G12Lazy(G12Solver):
"""Interface to the G12Lazy solver.
Parameters
----------
mzn_path : str
The path to the mzn executable.
fzn_path : str
The path to the flatzinc executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(
self, mzn_path='mzn-g12lazy', fzn_path='flatzinc',
globals_dir='g12_lazyfd'
):
super().__init__(mzn_path, fzn_path, globals_dir, 'lazy')
class G12MIP(G12Solver):
"""Interface to the G12MIP solver.
Parameters
----------
mzn_path : str
The path to the mzn executable.
fzn_path : str
The path to the flatzinc executable.
globals_dir : str
The path to the directory for global included files.
"""
def __init__(
self, mzn_path='mzn-g12mip', fzn_path='flatzinc', globals_dir='linear'
):
super().__init__(mzn_path, fzn_path, globals_dir, 'mip')
class OscarCBLS(Solver):
"""Interface to the Oscar/CBLS solver.
Parameters
----------
path : str
The path to the fzn-oscar-cbls executable.
globals_dir : str
The path to the directory for global included files. You should either
copy or link the 'mznlib' folder from the oscar-cbls-flatzinc
distribution into the minizinc library directory (with name
'oscar-cbls') or provide the full path here.
"""
def __init__(self, path='fzn-oscar-cbls', globals_dir='oscar-cbls'):
super().__init__(
globals_dir, support_all=True, support_num=True,
support_timeout=True, support_stats=True
)
self.cmd = path
def args(
self, mzn_file, *dzn_files, data=None, include=None, timeout=None,
all_solutions=False, num_solutions=None, statistics=False, **kwargs
):
"""Solve a FlatZinc problem with Oscar/CBLS.
Parameters
----------
mzn_file : str
The path to the fzn file to solve.
Returns
-------
str
The output of the solver in dzn format.
"""
log = logging.getLogger(__name__)
args = [self.cmd]
if statistics:
args += ['-s', '-v']
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', str(num_solutions)]
if timeout:
args += ['-t', str(timeout)]
args.append(mzn_file)
return args
class ORTools(Solver):
"""Interface to the OR-tools solver.
Parameters
----------
path : str
The path to the fzn-or-tools executable.
globals_dir : str
The path to the directory for global included files. You should either
copy or link the 'share/minizinc_cp' folder from the or-tools
distribution into the minizinc library directory (with name 'or-tools')
or provide the full path here.
"""
def __init__(self, fzn_path='fzn-or-tools', globals_dir='or-tools'):
super().__init__(
globals_dir, support_all=True, support_num=True,
support_timeout=True, support_stats=True
)
self.fzn_cmd = fzn_path
def args(
self, mzn_file, *dzn_files, data=None, include=None, timeout=None,
all_solutions=False, num_solutions=None, output_mode='item', parallel=1,
seed=0, statistics=False, **kwargs
):
"""Solve a FlatZinc problem with OR-tools.
Parameters
----------
mzn_file : str
The path to the fzn file to solve.
Returns
-------
str
The output of the solver in dzn format.
"""
args = [self.fzn_cmd]
if statistics:
args.append('-statistics')
if all_solutions:
args.append('--all_solutions')
if num_solutions is not None:
args += ['--num_solutions', str(num_solutions)]
if parallel != 1:
args += ['--threads', str(parallel)]
if timeout and timeout > 0:
timeout = timeout * 1000 # OR-tools takes milliseconds
args += ['--time_limit', str(timeout)]
if seed != 0:
args += ['--fz_seed', str(seed)]
args.append(mzn_file)
return args
#: Default Gecode instance.
gecode = Gecode()
#: Default Chuffed instance.
chuffed = Chuffed()
#: Default Optimathsat instance.
optimathsat = Optimathsat()
#: Default Opturion instance.
opturion = Opturion()
#: Default Gurobi instance.
gurobi = Gurobi()
#: Default CBC instance.
cbc = CBC()
#: Default G12Fd instance.
g12fd = G12Fd()
#: Default G12Lazy instance.
g12lazy = G12Lazy()
#: Default G12Lazy instance.
g12mip = G12MIP()
#: Default Oscar/CBLS instance.
oscar_cbls = OscarCBLS()
#: Default ORTools instance.
or_tools = ORTools()
|
py | b40de64a7cdbd58441c8d5e71f8c937199cba979 | #!/usr/bin/env python
import argparse
import logging
import os
import glob
import shutil
import sys
import stat
import tempfile
sys.path.append(os.path.join(
os.path.dirname(__file__),
"extension",
"textext"
))
from requirements_check import \
set_logging_levels, \
TexTextRequirementsChecker, \
defaults, \
LoggingColors, \
SUCCESS
from utility import Settings
# taken from https://stackoverflow.com/a/3041990/1741477
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
if sys.version_info[0] > 2:
read_input = input
else:
read_input = raw_input
while True:
sys.stdout.write(question + prompt)
choice = read_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class TemporaryDirectory(object):
""" Mimic tempfile.TemporaryDirectory from python3 """
def __init__(self):
self.dir_name = None
def __enter__(self):
self.dir_name = tempfile.mkdtemp("textext_")
return self.dir_name
def __exit__(self, exc_type, exc_val, exc_tb):
def retry_with_chmod(func, path, exec_info):
os.chmod(path, stat.S_IWRITE)
func(path)
if self.dir_name:
shutil.rmtree(self.dir_name, onerror=retry_with_chmod)
class StashFiles(object):
def __init__(self, stash_from, rel_filenames, tmp_dir, unstash_to=None):
self.stash_from = stash_from
self.unstash_to = stash_from if unstash_to is None else unstash_to
self.rel_filenames = rel_filenames
self.tmp_dir = tmp_dir
def __enter__(self):
for old_name, new_name in self.rel_filenames.items():
src = os.path.join(self.stash_from, old_name)
dst = os.path.join(self.tmp_dir, old_name)
if os.path.isfile(src):
if not os.path.isdir(os.path.dirname(dst)):
logger.info("Creating directory `%s`" % os.path.dirname(dst) )
os.makedirs(os.path.dirname(dst))
logger.info("Stashing `%s`" % dst)
shutil.copy2(src, dst)
def __exit__(self, exc_type, exc_val, exc_tb):
for old_name, new_name in self.rel_filenames.items():
src = os.path.join(self.tmp_dir, old_name)
dst = os.path.join(self.unstash_to, new_name)
if os.path.isfile(src):
if not os.path.isdir(os.path.dirname(dst)):
logger.info("Creating directory `%s`" % os.path.dirname(dst) )
os.makedirs(os.path.dirname(dst))
logger.info("Restoring old `%s` -> `%s`" % (old_name, dst))
shutil.copy2(src, dst)
class CopyFileOverDirectoryError(RuntimeError):
pass
class CopyFileAlreadyExistsError(RuntimeError):
pass
def copy_extension_files(src, dst, if_already_exists="raise"):
"""
src: glob expresion to copy from
dst: destination directory
if_already_exists: action on existing files. One of "raise" (default), "skip", "overwrite"
"""
if os.path.exists(dst):
if not os.path.isdir(dst):
logger.critical("Can't copy files to `%s`: it's not a directory")
raise CopyFileOverDirectoryError("Can't copy files to `%s`: it's not a directory")
else:
logger.info("Creating directory `%s`" % dst)
os.makedirs(dst)
for file in glob.glob(src):
basename = os.path.basename(file)
destination = os.path.join(dst, basename)
if os.path.exists(destination):
if if_already_exists == "raise":
logger.critical("Can't copy `%s`: `%s` already exists" % (file, destination))
raise CopyFileAlreadyExistsError("Can't copy `%s`: `%s` already exists" % (file, destination))
elif if_already_exists == "skip":
logger.info("Skipping `%s`" % file)
continue
elif if_already_exists == "overwrite":
logger.info("Overwriting `%s`" % destination)
pass
if os.path.isfile(file):
logger.info("Copying `%s` to `%s`" % (file, destination))
shutil.copy(file, destination)
else:
logger.info("Creating directory `%s`" % destination)
if os.path.exists(destination):
if not os.path.isdir(destination):
os.remove(destination)
os.mkdir(destination)
else:
os.mkdir(destination)
copy_extension_files(os.path.join(file, "*"),
destination,
if_already_exists=if_already_exists)
def remove_previous_installation(extension_dir):
previous_installation_files_and_folders = [
"asktext.py",
"default_packages.tex",
"latexlogparser.py",
"textext",
"textext.inx",
"textext.py",
"typesetter.py",
"win_app_paths.py",
]
for file_or_dir in previous_installation_files_and_folders:
file_or_dir = os.path.abspath(os.path.join(extension_dir, file_or_dir))
if os.path.isfile(file_or_dir):
logger.info("Removing `%s`" % file_or_dir)
os.remove(file_or_dir)
elif os.path.isdir(file_or_dir):
logger.info("Removing `%s`" % file_or_dir)
shutil.rmtree(file_or_dir)
else:
logger.debug("`%s` is not found" % file_or_dir)
if __name__ == "__main__":
EXIT_SUCCESS = 0
EXIT_REQUIREMENT_CHECK_UNKNOWN = 64
EXIT_REQUIREMENT_CHECK_FAILED = 65
EXIT_BAD_COMMAND_LINE_ARGUMENT_VALUE = 2
parser = argparse.ArgumentParser(description='Install TexText')
parser.add_argument(
"--inkscape-extensions-path",
default=defaults.inkscape_extensions_path,
type=str,
help="Path to inkscape extensions directory"
)
parser.add_argument(
"--inkscape-executable",
default=None,
type=str,
help="Full path to inkscape executable"
)
parser.add_argument(
"--pdflatex-executable",
default=None,
type=str,
help="Full path to pdflatex executable"
)
parser.add_argument(
"--lualatex-executable",
default=None,
type=str,
help="Full path to lualatex executable"
)
parser.add_argument(
"--xelatex-executable",
default=None,
type=str,
help="Full path to xelatex executable"
)
parser.add_argument(
"--skip-requirements-check",
default=False,
action='store_true',
help="Bypass minimal requirements check"
)
parser.add_argument(
"--skip-extension-install",
default=False,
action='store_true',
help="Don't install extension"
)
parser.add_argument(
"--keep-previous-installation-files",
default=None,
action='store_true',
help="Keep/discard files from previous installation, suppress prompt"
)
parser.add_argument(
"--color",
default=defaults.console_colors,
choices=("always", "never"),
help="Enables/disable console colors"
)
files_to_keep = { # old_name : new_name
"default_packages.tex": "textext/default_packages.tex", # old layout
"textext/default_packages.tex": "textext/default_packages.tex", # new layout
"textext/config.json": "textext/config.json" # new layout
}
args = parser.parse_args()
args.inkscape_extensions_path = os.path.expanduser(args.inkscape_extensions_path)
if args.color == "always":
LoggingColors.enable_colors = True
elif args.color == "never":
LoggingColors.enable_colors = False
set_logging_levels()
logger = logging.getLogger('TexText')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('[%(name)s][%(levelname)6s]: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler("setup.log")
fh.setLevel(ch.level)
fh.setFormatter(formatter)
logger.addHandler(fh)
settings = Settings()
checker = TexTextRequirementsChecker(logger, settings)
for executable_name in [
"inkscape",
"lualatex",
"pdflatex",
"xelatex",
]:
executable_path = getattr(args, "%s_executable" % executable_name)
if executable_path is not None:
if not checker.check_executable(executable_path):
logger.error("Bad `%s` executable provided: `%s`. Abort installation." % (executable_name, executable_path))
exit(EXIT_BAD_COMMAND_LINE_ARGUMENT_VALUE)
settings["%s-executable" % executable_name] = executable_path
if not args.skip_requirements_check:
check_result = checker.check()
if check_result == None:
logger.error("Automatic requirements check is incomplete")
logger.error("Please check requirements list manually and run:")
logger.error(" ".join(sys.argv + ["--skip-requirements-check"]))
exit(EXIT_REQUIREMENT_CHECK_UNKNOWN)
if check_result == False:
logger.error("Automatic requirements check found issue")
logger.error("Follow instruction above and run install script again")
logger.error("To bypass requirement check pass `--skip-requirements-check` to setup.py")
exit(EXIT_REQUIREMENT_CHECK_FAILED)
if not args.skip_extension_install:
if args.keep_previous_installation_files is None:
found_files_to_keep = {}
for old_filename, new_filename in files_to_keep.items():
if not os.path.isfile(os.path.join(args.inkscape_extensions_path, old_filename)):
logger.debug("%s not found" % old_filename)
else:
logger.debug("%s found" % old_filename)
if not os.path.isfile(os.path.join("extension", new_filename)):
logger.info("`%s` is not found in distribution, keep old file" % new_filename)
found_files_to_keep[old_filename] = new_filename
continue
with open(os.path.join(args.inkscape_extensions_path, old_filename)) as f_old, \
open(os.path.join("extension", new_filename)) as f_new:
if f_old.read() != f_new.read():
logger.debug("Content of `%s` are not identical version in distribution" % old_filename)
found_files_to_keep[old_filename] = new_filename
else:
logger.debug("Content of `%s` is identical to distribution" % old_filename)
files_to_keep = {}
if len(found_files_to_keep) > 0:
file_s = "file" if len(found_files_to_keep) == 1 else "files"
for old_filename, new_filename in found_files_to_keep.items():
if os.path.isfile(os.path.join("extension", new_filename)):
logger.warn("Existing `%s` differs from newer version in distribution" % old_filename)
if query_yes_no("Keep `%s` from previous installation?" % old_filename):
files_to_keep[old_filename] = new_filename
args.keep_previous_installation_files = True
else:
args.keep_previous_installation_files = False
if not args.keep_previous_installation_files:
files_to_keep = {}
with TemporaryDirectory() as tmp_dir, \
StashFiles(stash_from=args.inkscape_extensions_path,
rel_filenames=files_to_keep,
tmp_dir=tmp_dir
):
remove_previous_installation(args.inkscape_extensions_path)
copy_extension_files(
src="extension/*",
dst=args.inkscape_extensions_path,
if_already_exists="overwrite"
)
settings.save()
logger.log(SUCCESS, "--> TexText has been SUCCESSFULLY installed on your system <--")
exit(EXIT_SUCCESS)
|
py | b40de653cc740a2292d4fc3e6286326ed03a422e | import inspect
from argparse import ArgumentParser
from torch import nn
from lasaft.source_separation.conditioned.cunet.dcun_gpocm import DenseCUNet_GPoCM, DenseCUNet_GPoCM_Framework
from lasaft.source_separation.conditioned.loss_functions import get_conditional_loss
from lasaft.source_separation.sub_modules.building_blocks import TFC
from lasaft.utils import functions
class DCUN_TFC_GPoCM(DenseCUNet_GPoCM):
def __init__(self,
n_fft,
n_blocks, input_channels, internal_channels, n_internal_layers,
first_conv_activation, last_activation,
t_down_layers, f_down_layers,
kernel_size_t, kernel_size_f,
tfc_activation,
control_vector_type, control_input_dim, embedding_dim, condition_to,
control_type, control_n_layer, pocm_type, pocm_norm
):
tfc_activation = functions.get_activation_by_name(tfc_activation)
def mk_tfc(in_channels, internal_channels, f):
return TFC(in_channels, n_internal_layers, internal_channels,
kernel_size_t, kernel_size_f, tfc_activation)
def mk_ds(internal_channels, i, f, t_down_layers):
if t_down_layers is None:
scale = (2, 2)
else:
scale = (2, 2) if i in t_down_layers else (1, 2)
ds = nn.Sequential(
nn.Conv2d(in_channels=internal_channels, out_channels=internal_channels,
kernel_size=scale, stride=scale),
nn.BatchNorm2d(internal_channels)
)
return ds, f // scale[-1]
def mk_us(internal_channels, i, f, n, t_down_layers):
if t_down_layers is None:
scale = (2, 2)
else:
scale = (2, 2) if i in [n - 1 - s for s in t_down_layers] else (1, 2)
us = nn.Sequential(
nn.ConvTranspose2d(in_channels=internal_channels, out_channels=internal_channels,
kernel_size=scale, stride=scale),
nn.BatchNorm2d(internal_channels)
)
return us, f * scale[-1]
super(DCUN_TFC_GPoCM, self).__init__(
n_fft,
input_channels, internal_channels,
n_blocks, n_internal_layers,
mk_tfc, mk_ds, mk_us,
first_conv_activation, last_activation,
t_down_layers, f_down_layers,
# Conditional Mechanism #
control_vector_type, control_input_dim, embedding_dim, condition_to,
control_type, control_n_layer, pocm_type, pocm_norm
)
class DCUN_TFC_GPoCM_Framework(DenseCUNet_GPoCM_Framework):
def __init__(self, n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
optimizer, lr, dev_mode,
train_loss, val_loss,
**kwargs):
valid_kwargs = inspect.signature(DCUN_TFC_GPoCM.__init__).parameters
tfc_tdf_net_kwargs = dict((name, kwargs[name]) for name in valid_kwargs if name in kwargs)
tfc_tdf_net_kwargs['n_fft'] = n_fft
conditional_spec2spec = DCUN_TFC_GPoCM(**tfc_tdf_net_kwargs)
train_loss_ = get_conditional_loss(train_loss, n_fft, hop_length, **kwargs)
val_loss_ = get_conditional_loss(val_loss, n_fft, hop_length, **kwargs)
super(DCUN_TFC_GPoCM_Framework, self).__init__(n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
conditional_spec2spec,
optimizer, lr, dev_mode,
train_loss_, val_loss_
)
valid_kwargs = inspect.signature(DCUN_TFC_GPoCM_Framework.__init__).parameters
hp = [key for key in valid_kwargs.keys() if key not in ['self', 'kwargs']]
hp = hp + [key for key in kwargs if not callable(kwargs[key])]
self.save_hyperparameters(*hp)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--n_internal_layers', type=int, default=5)
parser.add_argument('--kernel_size_t', type=int, default=3)
parser.add_argument('--kernel_size_f', type=int, default=3)
parser.add_argument('--tfc_activation', type=str, default='relu')
return DenseCUNet_GPoCM_Framework.add_model_specific_args(parser)
|
py | b40de6a7451746f9568789aa07b43f9ce2d21f21 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 12:33:58 2018
@author: michaelek
"""
import pandas as pd
from pyhydrotel import get_mtypes, get_sites_mtypes, get_ts_data
import detidelevel as dtl
pd.options.display.max_columns = 10
######################################
### Parameters
server = 'sql2012prod05'
database = 'hydrotel'
site = '66401'
mtypes = ['water level', 'water level detided']
from_date = '2018-01-01'
to_date = '2018-03-01'
quantile = 0.3
output_path = r'E:\ecan\shared\projects\de-tide\de-tide_2018-10-16.html'
######################################
### Get data
mtypes1 = get_sites_mtypes(server, database, sites=site)
tsdata = get_ts_data(server, database, mtypes, site, from_date, to_date, None)
tsdata1 = dtl.util.tsreg(tsdata.unstack(1).reset_index().drop(['ExtSiteID'], axis=1).set_index('DateTime')).interpolate('time')
roll1 = tsdata1[['water level']].rolling(12, center=True).mean().dropna()
roll1.columns = ['smoothed original']
######################################
### Run detide
det1 = dtl.detide(roll1, quantile)
det2 = dtl.plot.plot_detide(roll1, quantile, output_path=output_path)
|
py | b40de754f9446d4740c918bb8abd3fb731e2f358 | import xml.etree.ElementTree as ET
import os
import csv
import shutil
root_path = 'downloaded_images'
save_path = "images_train"
train_csv = 'train.csv'
with open(train_csv) as f:
reader = csv.reader(f)
i = 0
for row in reader:
if i == 0:
print('dong')
else:
filename_value = row[0]
print(filename_value)
m = shutil.copy(os.path.join(root_path, filename_value), os.path.join(save_path, filename_value))
i = i + 1
|
py | b40de77fe751fc3d2220cef31f07642a84e9e781 | import logging
import sys
from pathlib import Path
import boto3
from igata import settings
from igata.handlers.aws.input.s3 import S3BucketImageInputCtxManager
from tests.utils import setup_teardown_s3_file
# add test root to PATH in order to load dummypredictor
BASE_TEST_DIRECTORY = Path(__file__).absolute().parent.parent.parent
sys.path.append(str(BASE_TEST_DIRECTORY))
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(levelname)s] (%(name)s) %(funcName)s: %(message)s")
logger = logging.getLogger(__name__)
# reduce logging output from noisy packages
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("botocore").setLevel(logging.WARNING)
logging.getLogger("boto3").setLevel(logging.WARNING)
logging.getLogger("pynamodb.connection.base").setLevel(logging.WARNING)
S3 = boto3.client("s3", endpoint_url=settings.S3_ENDPOINT)
TEST_INPUT_SQS_QUEUENAME = "input-test-queue"
TEST_SQS_OUTPUT_QUEUENAME = "output-test-queue"
TEST_BUCKETNAME = "test-bucket-local"
TEST_OUTPUT_BUCKETNAME = "test-output-bucket-local"
TEST_IMAGE_FILENAME = "pacioli-512x512.png"
TEST_IMAGE_FILEPATH = BASE_TEST_DIRECTORY / "data" / "images" / TEST_IMAGE_FILENAME
assert TEST_IMAGE_FILEPATH.exists()
SAMPLE_CSV_FILEPATH = Path(__file__).parent.parent.parent / "data" / "sample.csv"
SAMPLE_CSVGZ_FILEPATH = Path(__file__).parent.parent.parent / "data" / "sample.csv.gz"
TEST_IMAGE_S3URI = f"s3://{TEST_BUCKETNAME}/{TEST_IMAGE_FILENAME}"
class DummyException(Exception):
pass
@setup_teardown_s3_file(local_filepath=TEST_IMAGE_FILEPATH, bucket=TEST_BUCKETNAME, key=TEST_IMAGE_FILENAME)
def test_input_handler_s3bucketimageinputctxmanager():
image_found = False
s3uris = [TEST_IMAGE_S3URI]
input_settings = {}
with S3BucketImageInputCtxManager(**input_settings) as s3images:
for image, info in s3images.get_records(s3uris):
assert image.any()
assert info
image_found = True
assert image_found
|
py | b40de9b2a779019e49dc3a8537aed866c30ed2cb | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render
import youtube_dl
from .forms import DownloadForm
import re
def download_video(request):
global context
form = DownloadForm(request.POST or None)
if form.is_valid():
video_url = form.cleaned_data.get("url")
regex = r'^(http(s)?:\/\/)?((w){3}.)?youtu(be|.be)?(\.com)?\/.+'
#regex = (r"^((?:https?:)?\/\/)?((?:www|m)\.)?((?:youtube\.com|youtu.be))(\/(?:[\w\-]+\?v=|embed\/|v\/)?)([\w\-]+)(\S+)?$\n")
print(video_url)
if not re.match(regex,video_url):
print('nhi hoa')
return HttpResponse('Enter correct url.')
# if 'm.' in video_url:
# video_url = video_url.replace(u'm.', u'')
# elif 'youtu.be' in video_url:
# video_id = video_url.split('/')[-1]
# video_url = 'https://www.youtube.com/watch?v=' + video_id
# if len(video_url.split("=")[-1]) < 11:
# return HttpResponse('Enter correct url.')
ydl_opts = {}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
meta = ydl.extract_info(
video_url, download=False)
video_audio_streams = []
for m in meta['formats']:
file_size = m['filesize']
if file_size is not None:
file_size = f'{round(int(file_size) / 1000000,2)} mb'
resolution = 'Audio'
if m['height'] is not None:
resolution = f"{m['height']}x{m['width']}"
video_audio_streams.append({
'resolution': resolution,
'extension': m['ext'],
'file_size': file_size,
'video_url': m['url']
})
video_audio_streams = video_audio_streams[::-1]
context = {
'form': form,
'title': meta['title'], 'streams': video_audio_streams,
'description': meta['description'], 'likes': meta['like_count'],
'dislikes': meta['dislike_count'], 'thumb': meta['thumbnails'][3]['url'],
'duration': round(int(meta['duration'])/60, 2), 'views': f'{int(meta["view_count"]):,}'
}
return render(request, 'home.html', context)
return render(request, 'home.html', {'form': form})
|
py | b40dea2e4fad379e836b0cac59310e8feb496cf9 | import asyncio
import discord
import nacre, re
class HelpSession:
def __init__(self, pearl, config):
self.pearl = pearl
self.hangouts = self.pearl.hangouts
self.config = config
self.buildUsage()
self.buildHandle()
def build(self):
pass
def buildUsage(self):
self.usage = "Usage: {} command<br>Commands:".format(self.pearl.config['format'])
for command in self.config['commands']:
self.usage += '<br><b>{}</b>: {}'.format(command, self.config['commands'][command])
self.discUsage = discord.Embed(title=("Commands that I understand:"), color=int("ffdead", 16))
for command in self.config['commands']:
self.discUsage.add_field(name=command, value=self.config['commands'][command], inline=False)
def buildHandle(self):
messageFilter = nacre.handle.newMessageFilter('^{}\s+help(\s.*)?$'.format(self.pearl.config['format']))
async def handle(update):
if nacre.handle.isMessageEvent(update):
event = update.event_notification.event
if messageFilter(event):
await self.respond(event, caller="h")
self.pearl.updateEvent.addListener(handle)
async def respond(self, event, caller=None):
if caller == 'h':
message = self.usage
conversation = self.hangouts.getConversation(event=event)
await self.hangouts.send(message, conversation)
elif caller == 'd':
incoming = re.match('^{}\s+help(\s.*)?.*$'.format(self.pearl.config['format']), event.content)
if not incoming:
return
await self.pearl.embed(self.discUsage, event.channel)
def load(pearl, config):
return HelpSession(pearl, config)
|
py | b40debc4dc430898d799dd065657f7ab7068acd0 | # -*- coding: utf-8 -*-
# Copyright 2014-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <[email protected]>, 2014-2018
# - Vincent Garonne <[email protected]>, 2017
# - Thomas Beermann <[email protected]>, 2018
# - Hannes Hansen <[email protected]>, 2018-2019
# - Andrew Lister <[email protected]>, 2019
# - Benedikt Ziemons <[email protected]>, 2020
import json
from traceback import format_exc
from flask import Flask, Blueprint, Response, request
from flask.views import MethodView
from rucio.api.heartbeat import list_heartbeats
from rucio.common.exception import RucioException
from rucio.common.utils import APIEncoder
from rucio.web.rest.flaskapi.v1.common import request_auth_env, response_headers, check_accept_header_wrapper_flask
from rucio.web.rest.utils import generate_http_error_flask
class Heartbeat(MethodView):
""" REST API for Heartbeats. """
@check_accept_header_wrapper_flask(['application/json'])
def get(self):
"""
List all heartbeats.
.. :quickref: Heartbeat; List heartbeats.
:resheader Content-Type: application/json
:status 200: OK.
:status 401: Invalid Auth Token.
:status 406: Not Acceptable.
:returns: List of heartbeats.
"""
try:
return Response(json.dumps(list_heartbeats(issuer=request.environ.get('issuer'), vo=request.environ.get('vo')),
cls=APIEncoder), content_type='application/json')
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
return str(error), 500
def blueprint():
bp = Blueprint('heartbeat', __name__, url_prefix='/heartbeats')
heartbeat_view = Heartbeat.as_view('heartbeat')
bp.add_url_rule('', view_func=heartbeat_view, methods=['get', ])
bp.before_request(request_auth_env)
bp.after_request(response_headers)
return bp
def make_doc():
""" Only used for sphinx documentation """
doc_app = Flask(__name__)
doc_app.register_blueprint(blueprint())
return doc_app
|
py | b40debdc931d290ada107f51ad17a6da56e41381 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 2019
@author: joao ortigao
"""
from pandas import DataFrame as pd_DataFrame
from pandas import ExcelWriter as pd_ExcelWriter
from os.path import join as pathjoin
from os.path import exists as pathexists
from os.path import isfile
from os import mkdir as osmkdir
from os import getcwd as osgetcwd
from Bio import Entrez
from re import sub
from glob import glob as gb
import xml.etree.ElementTree as ET
parser = ET.XMLParser(encoding="utf-8")
##############################################################################
def CREATE_DIR(OUTDIR):
if not pathexists(pathjoin(OUTDIR)):
osmkdir(pathjoin(OUTDIR))
for DIR in ["IdListDIR","IdListDIR/disease","IdListDIR/query"]:
if not pathexists(pathjoin(OUTDIR,DIR)):
osmkdir(pathjoin(OUTDIR,DIR))
##############################################################################
def MAKE_DICIONARY(DISEASE_LIST):
DISEASES=[]
DISEASES = [line.rstrip('\n') for line in open(DISEASE_LIST)]
CODES = [s.replace(' ', '_') for s in DISEASES]
CODES = [s.replace('\'', '') for s in CODES]
DIC = dict(zip(DISEASES,CODES))
return(DIC)
##############################################################################
def esearch_disease(DISEASE_LIST,OUTDIR):
CREATE_DIR(OUTDIR)
DISEASE_DIC = MAKE_DICIONARY(DISEASE_LIST)
# data frame to store all Counts
# +2 for one extra line for "COUNTS" and "TOTAL1"
df=pd_DataFrame(index=range(0,len(DISEASE_DIC)+2),columns=range(0,8))
df.columns=["disease","COD","QUERY1","QUERY2","QUERY3","QUERY4",\
"QUERY5","TOTAL2"]
COL1=list(DISEASE_DIC); COL1.append('COUNTS'); COL1.append('TOTAL1')
df['disease']=COL1
# data frame to store all the commands used for each search
COMMAND=pd_DataFrame(index=range(0,len(DISEASE_DIC)),columns=range(0,8))
COMMAND.columns=["disease","COD","QUERY1","QUERY2","QUERY3","QUERY4",\
"QUERY5","END"]
COMMAND["disease"]=COL1[0:len(DISEASE_DIC)]
COMMAND["END"]='.'
# data frameto store the queries' explanations
QUERY_description=pd_DataFrame(index=range(0,5),columns=range(0,1))
QUERY_description.columns=["DESCRIPTION"]
QUERY_description.index=["QUERY1","QUERY2","QUERY3","QUERY4","QUERY5"]
QUERY1_desc='Procura o nome da doença em todos os campos e filtra por'\
' experimentos de expressão gênica feitos com amostras '\
'humanas. Essa é a QUERY mais abrangente.'
QUERY2_desc='Igual a QUERY1 só que também procura por "patient" OU '\
'"patients" em todos os campos'
QUERY3_desc='Igual a QUERY2 só que também filtra por bioprojects '\
'presentes na base de dados SRA'
QUERY4_desc='Procura o nome da doença somente no título do bioproject, '\
'procura por "patient" OU "patients" em todos os campos e '\
'filtra por experimentos de expressão gênica feitos com '\
'amostras humanas'
QUERY5_desc='Igual a QUERY4 só que também filtra por bioprojects '\
'presentes na base de dados SRA'
QUERY_description["DESCRIPTION"]=[QUERY1_desc,QUERY2_desc,QUERY3_desc,\
QUERY4_desc,QUERY5_desc]
IdList_QUERY1=[]
IdList_QUERY2=[]
IdList_QUERY3=[]
IdList_QUERY4=[]
IdList_QUERY5=[]
IdList_total=[]
N=0
for DISEASE in list(DISEASE_DIC):
print(str(N)+'\t'+DISEASE)
COD=DISEASE_DIC[DISEASE]
df["COD"][N]=COD
COMMAND["COD"][N]=COD
QUERY_DIC={'1':'("'+DISEASE+'"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])',
'2':'("'+DISEASE+'"[All Fields]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])',
'3':'("'+DISEASE+'"[All Fields]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter]AND"bioproject sra"[Filter])',
'4':'("'+DISEASE+'"[Title]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])',
'5':'("'+DISEASE+'"[Title]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])AND"bioproject sra"[Filter])'}
Idlist_disease=[]
ROUND=['1','2','3','4','5']
for R in ROUND:
QUERY='QUERY'+R
TERM=QUERY_DIC[R]
# COMMAND[locals[QUERY]][N]=TERM
handle = Entrez.esearch(db="bioproject", retmax=1000,
term=TERM)
record = Entrez.read(handle)
handle.close()
if int(record["Count"]) > 1000:
print('\nATTENTION!\nn'+record["Count"]+' bioprojects are '\
'related to this esearch and only 1000 will be written '\
'to the Idlist for the further analysis.\n\n'+QUERY+\
'for '+DISEASE+'\n\n'+QUERY_DIC[R]+'\n')
exit
# MONTAR LISTA POR DOENÇA
Idlist_disease+=list(record["IdList"])
IdList_total+=list(record["IdList"])
# ADD IDS TO QUERY AND TOTAL LISTS
# IdList_total+=record["IdList"]
if R == '1':
IdList_QUERY1+=list(record["IdList"])
COMMAND['QUERY1'][N]=TERM
df['QUERY1'][N]=int(record["Count"])
elif R == '2':
IdList_QUERY2+=list(record["IdList"])
COMMAND['QUERY2'][N]=TERM
df['QUERY2'][N]=int(record["Count"])
elif R == '3':
IdList_QUERY3+=list(record["IdList"])
COMMAND['QUERY3'][N]=TERM
df['QUERY3'][N]=int(record["Count"])
elif R == '4':
IdList_QUERY4+=list(record["IdList"])
COMMAND['QUERY4'][N]=TERM
df['QUERY4'][N]=int(record["Count"])
elif R == '5':
IdList_QUERY5+=list(record["IdList"])
COMMAND['QUERY5'][N]=TERM
df['QUERY5'][N]=int(record["Count"])
#remove replicates from the list
Idlist_disease=list(set(Idlist_disease))
df['TOTAL2'][N]=len(Idlist_disease)
outfile=pathjoin(OUTDIR,"IdListDIR/disease",COD+".txt")
with open(outfile, 'w') as f:
print( "\n".join(Idlist_disease), file = f)
f.close()
N+=1
#preencher a linha com totais
for COL in list(df)[2:len(df)]: #COL da terceira coluna até a última
df[COL][len(DISEASE_DIC)]=df[COL][0:len(DISEASE_DIC)].sum(axis=0)
# ESCREVER DEMAIS LISTAS PARA ARQUIVOS TXT
IdList_total=list(set(IdList_total))
outfile=pathjoin(OUTDIR,"IdListDIR/IdList_total.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_total), file = f)
f.close()
IdList_QUERY1=list(set(IdList_QUERY1))
df.loc[len(DISEASE_DIC)+1,"QUERY1"] = len(IdList_QUERY1)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY1.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY1), file = f)
f.close()
IdList_QUERY2=list(set(IdList_QUERY2))
df.loc[len(DISEASE_DIC)+1,"QUERY2"] = len(IdList_QUERY2)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY2.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY2), file = f)
f.close()
IdList_QUERY3=list(set(IdList_QUERY3))
df.loc[len(DISEASE_DIC)+1,"QUERY3"] = len(IdList_QUERY3)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY3.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY3), file = f)
f.close()
IdList_QUERY4=list(set(IdList_QUERY4))
df.loc[len(DISEASE_DIC)+1,"QUERY4"] = len(IdList_QUERY4)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY4.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY4), file = f)
f.close()
IdList_QUERY5=list(set(IdList_QUERY5))
df.loc[len(DISEASE_DIC)+1,"QUERY5"] = len(IdList_QUERY5)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY5.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY5), file = f)
f.close()
#ESCREVER TODOS OS RESULTADOS PARA UM ARQUIVO EXCEL
writer = pd_ExcelWriter(pathjoin(OUTDIR,'search_NCBI_RESULT.xlsx'),
engine='xlsxwriter')
df.to_excel(writer, sheet_name='counts')
COMMAND.to_excel(writer, sheet_name='command_lines')
QUERY_description.to_excel(writer, sheet_name='query_description')
writer.save()
return(pathjoin(osgetcwd(),OUTDIR))
##############################################################################
def efetch_found_bioprojects(OUTDIR):
def printProgressBar (iteration, total, prefix = '', suffix = '', \
decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent \
complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}")\
.format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
"""
COLETAR INFORMAÇOES SOBRE BIOPROJECTS ECONTRADOS
"""
if pathexists(OUTDIR):
for DIR in ['Bioprojects','Bioprojects/xml']:
if not pathexists(pathjoin(OUTDIR,DIR)):
osmkdir(pathjoin(OUTDIR,DIR))
path_to_list=pathjoin(OUTDIR,'IdListDIR/IdList_total.txt')
if isfile(path_to_list):
with open(path_to_list,'r') as f:
IdList_total=list(filter(None, f.read().splitlines()))
else:
print('File '+f+' was not found. Run esearch_disease(OUTDIR) '\
'for making it.')
exit()
else:
print('Directory '+pathjoin(OUTDIR)+' is not accessible. Did you run'\
'esearch_disease() previously? If not, do it and try again.')
exit()
df2=pd_DataFrame(index=range(0,len(IdList_total)),columns=range(0,7))
df2.columns=["ID","accession","GEO","title","abstract","disease","COD"]
df2["ID"]=IdList_total
print("\n\n") # ESSE PRINT SERVE PARA DISTANCIAR A BARRA DE PROCESSAMENTO
# QUE VEM LOGO ABAIXO DENTRO DO LOOPING
# prepare bar progress
l = len(IdList_total)
i=0
printProgressBar(0, l, prefix = 'Download:', suffix = 'Complete',
length = 50)
RECALL=[] # if download fails, the ID is stored in RECALL
DIC_ID={}
for ID in IdList_total:
try:
handle = Entrez.efetch(db="bioproject", id=ID)
except:
RECALL+=[ID]
print('handle = Entrez.efetch(db="bioproject", id='+ID+')\tFAILED')
continue # avoid catastrophic event in case NCBI fails to give
# the informatio for one ID
try:
record = handle.read()
root = ET.fromstring(record)
DIC = root.find(".//ProjectID/ArchiveID").attrib
DIC_ID[DIC['accession']] = DIC_ID.get(DIC['accession'],DIC['id'])
outfile=pathjoin(OUTDIR,'Bioprojects/xml',DIC['accession']+\
'_'+DIC['id']+'.xml')
#print(outfile)
with open(outfile, "w", encoding="utf-8") as f:
print(record, file = f)
except:
RECALL+=[ID]
print('FAILED to process '+ID+' during the first trial')
continue
printProgressBar(i+1, l, prefix = 'Download:', suffix = 'Complete',
length = 50)
i+=1
# RECALL for failure IDs
if len(RECALL) > 0:
print("\n\nFailure to download IDs. STARTING RECALL.")
l = len(RECALL)
i=0
printProgressBar(0, l, prefix = 'Download:', suffix = 'Complete',
length = 50)
RECALL2=[]
for ID in RECALL:
try:
handle = Entrez.efetch(db="bioproject", id=ID)
except:
RECALL2+=[ID]
print('handle = Entrez.efetch(db="bioproject", id='+ID+')'\
'\tFAILED in RECALL')
continue
try:
record = handle.read()
root = ET.fromstring(record)
DIC = root.find(".//ProjectID/ArchiveID").attrib
DIC_ID[DIC['accession']] = DIC_ID.get(DIC['accession'],DIC['id'])
outfile=pathjoin(OUTDIR,'Bioprojects/xml',DIC['accession']+\
'_'+DIC['id']+'.xml')
#print(outfile)
with open(outfile, "w", encoding="utf-8") as f:
print(record, file = f)
except:
RECALL2+=[ID]
print('FAILED to process '+ID+' during the RECALL')
continue
printProgressBar(i+1, l, prefix = 'RECALL:', suffix = 'Complete',
length = 50)
i+=1
if len(RECALL2) > 0:
outfile=pathjoin(OUTDIR,'Bioprojects/','RECALL_failure.txt')
open(outfile,'w').write(str(RECALL2))
print("It was not possible to get ID even during the RECALL\nYou"\
"can find the problematic IDs on file:\n"+outfile)
outfile=pathjoin(OUTDIR,'Bioprojects/','dict_ID_ACC.txt')
open(outfile,'w').write(str(DIC_ID))
##############################################################################
def collect_XML_file(OUTDIR):
# aqui são importados os xml com a descrição de cada bioproject
files = gb(pathjoin(OUTDIR,'Bioprojects/xml/*.xml'))
df=pd_DataFrame(index=range(0,len(files)),columns=range(0,13))
df.columns=["ID","accession","GEO","title","abstract","disease1",\
"COD1","disease2","COD2","disease3","COD3","disease4","COD4"]
DIC_ID={}
N=0
for file in files:
#with open(file, "r", encoding="utf-8") as f:
#contents = f.read()
#tree = ET.fromstring(contents)
tree = ET.parse(file,parser=ET.XMLParser(encoding="utf-8"))
root = tree.getroot()
try:
GEO = root.find(".//ExternalLink/dbXREF/ID").text
except:
GEO = None # declare empty variable
title = root.find(".//ProjectDescr/Title").text
abstract = root.find(".//ProjectDescr/Description").text
DIC = root.find(".//ProjectID/ArchiveID").attrib
DIC_ID[DIC['accession']] = DIC_ID.get(DIC['accession'],DIC['id'])
accession=DIC['accession']
ID=DIC['id']
for COL in ['ID','accession','GEO','title','abstract']:
df[COL][N]=locals()[COL]
#print(N)
N+=1
return df
##############################################################################
def classify_disease(df2,OUTDIR,DISEASE_LIST):
DATADIR=OUTDIR
COD_DIC = MAKE_DICIONARY(DISEASE_LIST)
COD_DIC = {v: k for k, v in COD_DIC.items()} # invert the dictionary map
files2 = gb(pathjoin(OUTDIR,'IdListDIR/disease/*.txt'))
DISEASE1=[]
DISEASE2=[]
DISEASE3=[]
DISEASE4=[]
for file2 in files2:
COD = sub('.txt','',sub('.*IdListDIR/disease\\\\','',file2))
DISEASE = COD_DIC[COD]
with open(file2,'r') as f:
IDs = filter(None, f.read().splitlines())
f.close()
ROUND=['1','2','3','4']
for ID in IDs:
#print(ID)
for R in ROUND:
if ID not in locals()["DISEASE"+R]:
POS=df2[df2["ID"] == ID].index[0]
df2.loc[[POS],'disease'+R] = DISEASE
df2.loc[[POS],'COD'+R] = COD
locals()["DISEASE"+R].append(ID)
break
return df2
##############################################################################
def writer(df2, OUTDIR):
writer = pd_ExcelWriter(pathjoin(OUTDIR,'db_para_curagem.xlsx'),
engine='xlsxwriter')
df2.to_excel(writer, sheet_name='db_completo_nov18')
writer.save()
df2.to_csv(pathjoin(OUTDIR,'db_para_curagem.tsv'),sep='\t')
|
py | b40dec52b527e266cd5d596e78ddddac28d29742 | #coding: utf-8
import os
import cv2
import numpy as np
from . import cvui
from ._cvpath import PYCHARMERS_OPENCV_VIDEO_DIR
from .editing import resize_aspect
from .video_image_handler import VideoCaptureCreate
from .windows import cv2key2chr
from ..utils.generic_utils import now_str
from ..utils.subprocess_utils import get_monitor_size
from ..utils._colorings import toBLUE
from ..__meta__ import __project_name__
class cv2Project():
"""OpenCV project wrapper with useful GUI tools.
Args:
args (Namespace) : Simple object for storing attributes.
Note:
* Image object ( ``np.ndarray`` ) has the shape ( ``height`` , ``width`` , ``channel`` )
* ``XXX_size`` attributes are formatted as ( ``width`` , ``height`` )
Attributes:
cap (VideoCapture) : VideoCapture (mimic) object. See :meth:`VideoCaptureCreate <pycharmers.opencv.video_image_handler.VideoCaptureCreate>`
monitor (np.ndarray) : Background image. shape= ( ``monitor_height`` , ``monitor_width``, 3)
monitor_height : The height of monitor.
monitor_width : The width of monitor.
original_height (int) : The height of original frame.
original_width (int) : The width of original frame.
frame_height (int) : The height of resized frame.
frame_width (int) : The width of resized frame.
frame_dsize (tuple) : ( ``frame_width`` , ``frame_height`` )
frame_halfsize (tuple) : ( ``frame_width//2`` , ``frame_height//2`` )
gui_x (int) : ``frame_width`` + ``gui_margin``
fps (int) : Frame per seconds.
video (cv2.VideoWriter) : Video Writer.
video_fn (str) : The file name of video.
OtherAttributes:
See :py:class:`cv2ArgumentParser <pycharmers.utils.argparse_utils.cv2ArgumentParser>` .
"""
def __init__(self, args, **kwargs):
self.__dict__.update(args.__dict__)
self.__dict__.update(kwargs)
self.init()
def init(self):
"""Initialize VideoCapture (mimic) object and GUI tools.
Note:
* To run this method, ``self`` must have these attributes.
* winname (str) : Window name.
* path (str) : Path to video or image.
* cam (int) : The ID of the web camera.
* ext (str) : The extension for saved image.
* gui_width (int) : The width of the GUI tools.
* gui_margin (int) : The margin of GUI control tools.
* monitor_size (ListParamProcessor) : Monitor size. ( ``width`` , ``height`` )
* autofit (bool) : Whether to fit display size to window size.
* twitter (bool) : Whether you want to run for tweet. ( ``display_size`` will be () )
* capture (bool) : Whether you want to save as video.
* After run this method, ``self`` will have these attributes.
* cap (VideoCapture) : VideoCapture (mimic) object. See :meth:`VideoCaptureCreate <pycharmers.opencv.video_image_handler.VideoCaptureCreate>`
* monitor (np.ndarray) : Background image. shape= ( ``monitor_height`` , ``monitor_width``, 3)
* monitor_height : The height of monitor.
* monitor_width : The width of monitor.
* original_height (int) : The height of original frame.
* original_width (int) : The width of original frame.
* frame_height (int) : The height of resized frame.
* frame_width (int) : The width of resized frame.
* frame_dsize (tuple) : ( ``frame_width`` , ``frame_height`` )
* frame_halfsize (tuple) : ( ``frame_width//2`` , ``frame_height//2`` )
* gui_x (int) : ``frame_width`` + ``gui_margin``
* fps (int) : Frame per seconds.
* video (cv2.VideoWriter) : Video Writer.
* video_fn (str) : The file name of video.
* fn_prefix (str) : The prefix of filename ( ``"" if self.path is None else self.path+"."`` )
"""
cap = VideoCaptureCreate(path=self.path, cam=self.cam)
if self.autofit:
monitor_width, monitor_height = get_monitor_size()
elif self.twitter:
monitor_width, monitor_height = (1300, 733)
else:
monitor_width, monitor_height = self.monitor_size
fn_prefix = "" if self.path is None else self.path+"."
monitor = np.zeros(shape=(monitor_height, monitor_width, 3), dtype=np.uint8)
original_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
original_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height, frame_width = resize_aspect(src=np.zeros(shape=(original_height, original_width, 1), dtype=np.uint8), dsize=(monitor_width-self.gui_width, monitor_height)).shape[:2]
frame_dsize = (frame_width, frame_height)
frame_halfsize = (frame_width//2, frame_height//2)
gui_x = frame_width + self.gui_margin
fps = cap.get(cv2.CAP_PROP_FPS)
video_path = f'{fn_prefix}.{now_str()}.mp4'
video = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc('m','p','4','v'), fps, (monitor_width, monitor_height))
print(f"Created {toBLUE(video_path)}")
cvui.init(windowNames=self.winname, numWindows=1, delayWaitKey=1, createNamedWindows=True)
cv2.moveWindow(winname=self.winname, x=0, y=0)
# NOTE: Register the variables defined here as attributes.
defined_args = locals()
defined_args.pop("self")
self.__dict__.update(defined_args)
def wrap(self, func):
"""Wrap the function.
Args:
func (function) : A function that receives and returns ``frame``.
"""
params = self.__dict__
char = ""
while (True):
self.monitor[:] = self.gui_color
ret, frame = self.cap.read()
if not ret: break
# Wrap the function.
frame = func(frame=frame, **params)
# Recieve the key.
key = cvui.lastKeyPressed()
if key != -1:
char = cv2key2chr(key)
# y = self.frame_height-120
cvui.text(where=self.monitor, x=self.gui_x, y=self.frame_height-120, text=f" Your input: {char}")
# y = self.frame_height-95
if cvui.button(where=self.monitor, x=self.gui_x, y=self.frame_height-95, width=70, height=30, label="&Save", color=(137, 225, 241)):
filename = f'{self.fn_prefix}.{now_str()}{self.ext}'
cv2.imwrite(filename=filename, img=frame)
cv2.imshow(winname=filename, mat=resize_aspect(cv2.imread(filename), dsize=self.frame_halfsize))
print(f"Saved {toBLUE(filename)}")
if cvui.button(where=self.monitor, x=self.gui_x+80, y=self.frame_height-95, width=80, height=30, label="&Stop" if self.capture else "&Capture", color=(110, 93, 211) if self.capture else (177, 163, 121)):
self.capture = not self.capture
# y = self.frame_height-60
if key == cvui.ESCAPE or cvui.button(where=self.monitor, x=self.gui_x+105, y=self.frame_height-60, width=55, height=30, label="&Quit", color=(128, 95, 159)):
break
if cvui.button(where=self.monitor, x=self.gui_x, y=self.frame_height-60, width=95, height=30, label="&FullScreen", color=(116, 206, 173)):
cv2.setWindowProperty(
winname=self.winname,
prop_id=cv2.WND_PROP_FULLSCREEN,
prop_value=1-cv2.getWindowProperty(
winname=self.winname,
prop_id=cv2.WND_PROP_FULLSCREEN,
)
)
# y = self.frame_height-20
cvui.text(where=self.monitor, x=self.gui_x, y=self.frame_height-20, text=__project_name__)
cvui.beginRow(where=self.monitor, x=0, y=0)
cvui.image(image=cv2.resize(src=frame, dsize=self.frame_dsize))
cvui.endRow()
cvui.update()
cv2.imshow(self.winname, self.monitor)
if self.capture:
self.video.write(self.monitor)
self.release()
def release(self):
"""Do the necessary processing at the end"""
cv2.destroyAllWindows()
self.cap.release()
if os.path.getsize(self.video_path) <= 1000:
os.remove(self.video_path)
print(f"Deleted {toBLUE(self.video_path)} (because you didn't capture the window)") |
py | b40dec92d0092c3f81dae609fc7f6395bf44a343 | """Sopel Plugins Command Line Interface (CLI): ``sopel-plugins``"""
from __future__ import generator_stop
import argparse
import inspect
import operator
from sopel import config, plugins, tools
from . import utils
ERR_CODE = 1
"""Error code: program exited with an error"""
def build_parser():
"""Configure an argument parser for ``sopel-plugins``.
:return: the argument parser
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(
description='Sopel plugins tool')
# Subparser: sopel-plugins <sub-parser> <sub-options>
subparsers = parser.add_subparsers(
help='Action to perform',
dest='action')
# sopel-plugins show <name>
show_parser = subparsers.add_parser(
'show',
formatter_class=argparse.RawTextHelpFormatter,
help="Show plugin details",
description="Show detailed information about a plugin.")
utils.add_common_arguments(show_parser)
show_parser.add_argument('name', help='Plugin name')
# sopel-plugins configure <name>
config_parser = subparsers.add_parser(
'configure',
formatter_class=argparse.RawTextHelpFormatter,
help="Configure plugin with a config wizard",
description=inspect.cleandoc("""
Run a config wizard to configure a plugin.
This can be used whether the plugin is enabled or not.
"""))
utils.add_common_arguments(config_parser)
config_parser.add_argument('name', help='Plugin name')
# sopel-plugins list
list_parser = subparsers.add_parser(
'list',
formatter_class=argparse.RawTextHelpFormatter,
help="List available Sopel plugins",
description=inspect.cleandoc("""
List available Sopel plugins from all possible sources.
Plugin sources are: built-in, from ``sopel_modules.*``,
from ``sopel.plugins`` entry points, or Sopel's plugin directories.
Enabled plugins are displayed in green; disabled, in red.
"""))
utils.add_common_arguments(list_parser)
list_parser.add_argument(
'-C', '--no-color',
help='Disable colors',
dest='no_color',
action='store_true',
default=False)
list_enable = list_parser.add_mutually_exclusive_group(required=False)
list_enable.add_argument(
'-e', '--enabled-only',
help='Display only enabled plugins',
dest='enabled_only',
action='store_true',
default=False)
list_enable.add_argument(
'-d', '--disabled-only',
help='Display only disabled plugins',
dest='disabled_only',
action='store_true',
default=False)
list_parser.add_argument(
'-n', '--name-only',
help='Display only plugin names',
dest='name_only',
action='store_true',
default=False)
# sopel-plugins disable
disable_parser = subparsers.add_parser(
'disable',
formatter_class=argparse.RawTextHelpFormatter,
help="Disable a Sopel plugins",
description=inspect.cleandoc("""
Disable a Sopel plugin by its name, no matter where it comes from.
It is not possible to disable the ``coretasks`` plugin.
"""))
utils.add_common_arguments(disable_parser)
disable_parser.add_argument(
'names', metavar='name', nargs='+',
help=inspect.cleandoc("""
Name of the plugin to disable.
Can be used multiple times to disable multiple plugins at once.
In case of error, configuration is not modified.
"""))
disable_parser.add_argument(
'-f', '--force', action='store_true', default=False,
help=inspect.cleandoc("""
Force exclusion of the plugin.
When ``core.enable`` is defined, a plugin may be disabled without
being excluded. In this case, use this option to force
its exclusion.
"""))
disable_parser.add_argument(
'-r', '--remove', action='store_true', default=False,
help="Remove from ``core.enable`` list if applicable.")
# sopel-plugins enable
enable_parser = subparsers.add_parser(
'enable',
formatter_class=argparse.RawTextHelpFormatter,
help="Enable a Sopel plugin",
description=inspect.cleandoc("""
Enable a Sopel plugin by its name, no matter where it comes from.
The ``coretasks`` plugin is always enabled.
By default, a plugin that is not excluded is enabled, unless at
least one plugin is defined in the ``core.enable`` list.
In that case, Sopel uses an "allow-only" policy for plugins, and
all desired plugins must be added to this list.
"""))
utils.add_common_arguments(enable_parser)
enable_parser.add_argument(
'names', metavar='name', nargs='+',
help=inspect.cleandoc("""
Name of the plugin to enable.
Can be used multiple times to enable multiple plugins at once.
In case of error, configuration is not modified.
"""))
enable_parser.add_argument(
'-a', '--allow-only',
dest='allow_only',
action='store_true',
default=False,
help=inspect.cleandoc("""
Enforce allow-only policy.
It makes sure the plugin is added to the ``core.enable`` list.
"""))
return parser
def handle_list(options):
"""List Sopel plugins.
:param options: parsed arguments
:type options: :class:`argparse.Namespace`
:return: 0 if everything went fine
"""
settings = utils.load_settings(options)
no_color = options.no_color
name_only = options.name_only
enabled_only = options.enabled_only
disabled_only = options.disabled_only
# get usable plugins
items = (
(name, info[0], info[1])
for name, info in plugins.get_usable_plugins(settings).items()
)
items = (
(name, plugin, is_enabled)
for name, plugin, is_enabled in items
)
# filter on enabled/disabled if required
if enabled_only:
items = (
(name, plugin, is_enabled)
for name, plugin, is_enabled in items
if is_enabled
)
elif disabled_only:
items = (
(name, plugin, is_enabled)
for name, plugin, is_enabled in items
if not is_enabled
)
# sort plugins
items = sorted(items, key=operator.itemgetter(0))
for name, plugin, is_enabled in items:
description = {
'name': name,
'status': 'enabled' if is_enabled else 'disabled',
}
# optional meta description from the plugin itself
try:
plugin.load()
description.update(plugin.get_meta_description())
# colorize name for display purpose
if not no_color:
if is_enabled:
description['name'] = utils.green(name)
else:
description['name'] = utils.red(name)
except Exception as error:
label = ('%s' % error) or 'unknown loading exception'
error_status = 'error'
description.update({
'label': 'Error: %s' % label,
'type': 'unknown',
'source': 'unknown',
'status': error_status,
})
if not no_color:
if is_enabled:
# yellow instead of green
description['name'] = utils.yellow(name)
else:
# keep it red for disabled plugins
description['name'] = utils.red(name)
description['status'] = utils.red(error_status)
template = '{name}/{type} {label} ({source}) [{status}]'
if name_only:
template = '{name}'
print(template.format(**description))
return 0 # successful operation
def handle_show(options):
"""Show plugin details.
:param options: parsed arguments
:type options: :class:`argparse.Namespace`
:return: 0 if everything went fine;
1 if the plugin doesn't exist or can't be loaded
"""
plugin_name = options.name
settings = utils.load_settings(options)
usable_plugins = plugins.get_usable_plugins(settings)
# plugin does not exist
if plugin_name not in usable_plugins:
tools.stderr('No plugin named %s' % plugin_name)
return ERR_CODE
plugin, is_enabled = usable_plugins[plugin_name]
description = {
'name': plugin_name,
'status': 'enabled' if is_enabled else 'disabled',
}
# optional meta description from the plugin itself
loaded = False
try:
plugin.load()
description.update(plugin.get_meta_description())
loaded = True
except Exception as error:
label = ('%s' % error) or 'unknown loading exception'
error_status = 'error'
description.update({
'label': 'Error: %s' % label,
'type': 'unknown',
'source': 'unknown',
'status': error_status,
})
print('Plugin:', description['name'])
print('Status:', description['status'])
print('Type:', description['type'])
print('Source:', description['source'])
print('Label:', description['label'])
if not loaded:
print('Loading failed')
return ERR_CODE
print('Loaded successfully')
print('Setup:', 'yes' if plugin.has_setup() else 'no')
print('Shutdown:', 'yes' if plugin.has_shutdown() else 'no')
print('Configure:', 'yes' if plugin.has_configure() else 'no')
return 0 # successful operation
def handle_configure(options):
"""Configure a Sopel plugin with a config wizard.
:param options: parsed arguments
:type options: :class:`argparse.Namespace`
:return: 0 if everything went fine;
1 if the plugin doesn't exist or can't be loaded
"""
plugin_name = options.name
settings = utils.load_settings(options)
usable_plugins = plugins.get_usable_plugins(settings)
# plugin does not exist
if plugin_name not in usable_plugins:
tools.stderr('No plugin named %s' % plugin_name)
return ERR_CODE
plugin, is_enabled = usable_plugins[plugin_name]
try:
plugin.load()
except Exception as error:
tools.stderr('Cannot load plugin %s: %s' % (plugin_name, error))
return ERR_CODE
if not plugin.has_configure():
tools.stderr('Nothing to configure for plugin %s' % plugin_name)
return 0 # nothing to configure is not exactly an error case
print('Configure %s' % plugin.get_label())
try:
plugin.configure(settings)
except KeyboardInterrupt:
tools.stderr(
'\nOperation cancelled; the config file has not been modified.')
return ERR_CODE # cancelled operation
settings.save()
if not is_enabled:
tools.stderr(
"Plugin {0} has been configured but is not enabled. "
"Use 'sopel-plugins enable {0}' to enable it".format(plugin_name)
)
return 0 # successful operation
def _handle_disable_plugin(settings, plugin_name, force):
excluded = settings.core.exclude
# nothing left to do if already excluded
if plugin_name in excluded:
tools.stderr('Plugin %s already disabled.' % plugin_name)
return False
# recalculate state: at the moment, the plugin is not in the excluded list
# however, with ensure_remove, the enable list may be empty, so we have
# to compute the plugin's state here, and not use what comes from
# plugins.get_usable_plugins
is_enabled = (
not settings.core.enable or
plugin_name in settings.core.enable
)
# if not enabled at this point, exclude if options.force is used
if not is_enabled and not force:
tools.stderr(
'Plugin %s is disabled but not excluded; '
'use -f/--force to force its exclusion.'
% plugin_name)
return False
settings.core.exclude = excluded + [plugin_name]
return True
def display_unknown_plugins(unknown_plugins):
"""Print an error message when one or more plugins are unknown.
:param list unknown_plugins: list of unknown plugins
"""
# at least one of the plugins does not exist
tools.stderr(utils.get_many_text(
unknown_plugins,
one='No plugin named {item}.',
two='No plugin named {first} or {second}.',
many='No plugin named {left}, or {last}.'
))
def handle_disable(options):
"""Disable Sopel plugins.
:param options: parsed arguments
:type options: :class:`argparse.Namespace`
:return: 0 if everything went fine;
1 if the plugin doesn't exist,
or if attempting to disable coretasks (required)
"""
plugin_names = options.names
force = options.force
ensure_remove = options.remove
settings = utils.load_settings(options)
usable_plugins = plugins.get_usable_plugins(settings)
actually_disabled = []
# coretasks is sacred
if 'coretasks' in plugin_names:
tools.stderr('Plugin coretasks cannot be disabled.')
return ERR_CODE # do nothing and return an error code
unknown_plugins = [
name
for name in plugin_names
if name not in usable_plugins
]
if unknown_plugins:
display_unknown_plugins(unknown_plugins)
return ERR_CODE # do nothing and return an error code
# remove from enabled if asked
if ensure_remove:
settings.core.enable = [
name
for name in settings.core.enable
if name not in plugin_names
]
settings.save()
# disable plugin (when needed)
actually_disabled = tuple(
name
for name in plugin_names
if _handle_disable_plugin(settings, name, force)
)
# save if required
if actually_disabled:
settings.save()
else:
return 0 # nothing to disable or save, but not an error case
# display plugins actually disabled by the command
print(utils.get_many_text(
actually_disabled,
one='Plugin {item} disabled.',
two='Plugins {first} and {second} disabled.',
many='Plugins {left}, and {last} disabled.'
))
return 0
def _handle_enable_plugin(settings, usable_plugins, plugin_name, allow_only):
enabled = settings.core.enable
excluded = settings.core.exclude
# coretasks is sacred
if plugin_name == 'coretasks':
tools.stderr('Plugin coretasks is always enabled.')
return False
# is it already enabled, but should we enforce anything?
is_enabled = usable_plugins[plugin_name][1]
if is_enabled and not allow_only:
# already enabled, and no allow-only option: all good
if plugin_name in enabled:
tools.stderr('Plugin %s is already enabled.' % plugin_name)
else:
# suggest to use --allow-only option
tools.stderr(
'Plugin %s is enabled; '
'use option -a/--allow-only to enforce allow only policy.'
% plugin_name)
return False
# not enabled, or option allow_only to enforce
if plugin_name in excluded:
# remove from excluded
settings.core.exclude = [
name
for name in settings.core.exclude
if plugin_name != name
]
elif plugin_name in enabled:
# not excluded, and already in enabled list: all good
tools.stderr('Plugin %s is already enabled' % plugin_name)
return False
if plugin_name not in enabled and (enabled or allow_only):
# not excluded, but not enabled either: allow-only mode required
# either because of the current configuration, or by request
settings.core.enable = enabled + [plugin_name]
return True
def handle_enable(options):
"""Enable a Sopel plugin.
:param options: parsed arguments
:type options: :class:`argparse.Namespace`
:return: 0 if everything went fine;
1 if the plugin doesn't exist
"""
plugin_names = options.names
allow_only = options.allow_only
settings = utils.load_settings(options)
usable_plugins = plugins.get_usable_plugins(settings)
# plugin does not exist
unknown_plugins = [
name
for name in plugin_names
if name not in usable_plugins
]
if unknown_plugins:
display_unknown_plugins(unknown_plugins)
return ERR_CODE # do nothing and return an error code
actually_enabled = tuple(
name
for name in plugin_names
if _handle_enable_plugin(settings, usable_plugins, name, allow_only)
)
# save if required
if actually_enabled:
settings.save()
else:
return 0 # nothing to disable or save, but not an error case
# display plugins actually disabled by the command
print(utils.get_many_text(
actually_enabled,
one='Plugin {item} enabled.',
two='Plugins {first} and {second} enabled.',
many='Plugins {left}, and {last} enabled.'
))
return 0
def main():
"""Console entry point for ``sopel-plugins``."""
parser = build_parser()
options = parser.parse_args()
action = options.action
if not action:
parser.print_help()
return ERR_CODE
try:
if action == 'list':
return handle_list(options)
elif action == 'show':
return handle_show(options)
elif action == 'configure':
return handle_configure(options)
elif action == 'disable':
return handle_disable(options)
elif action == 'enable':
return handle_enable(options)
except KeyboardInterrupt:
tools.stderr('Bye!')
return ERR_CODE
except config.ConfigurationNotFound as err:
tools.stderr(err)
tools.stderr('Use `sopel-config init` to create a new config file.')
return ERR_CODE
|
py | b40dee4c88e22265fa70d255caf1b0ff50420d07 | # Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from python_code.helper.binary_trees import generate_binary_tree, print_binary_tree
from solution import Solution
def main():
solution = Solution()
###########################################################
root1 = generate_binary_tree([1, 3, 2, 5])
root2 = generate_binary_tree([2, 1, 3, None, 4, None, 7])
print_binary_tree(solution.merge_trees_iteration(root1, root2), is_root=True)
root1 = generate_binary_tree([1])
root2 = generate_binary_tree([1, 2])
print_binary_tree(solution.merge_trees_iteration(root1, root2), is_root=True)
if __name__ == '__main__':
main()
|
py | b40dee7b29f817a7ae0b1f188136df6e1a00a506 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolStopResizeOptions(Model):
"""Additional parameters for the Pool_stop_resize operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to perform
the operation only if the resource's ETag does not match the specified
ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
py | b40deee19f16bdaa8d09d457f686a4d19fbe743a | import os
import cv2
import sys
import pdb
import math
import time
import torch
import torch.nn
import torch.nn.functional as F
from pytracking import TensorList
from pytracking.features import augmentation
from pytracking.tracker.base import BaseTracker
from pytracking.utils.plotting import show_tensor
from pytracking.features.preprocessing import sample_patch
from pytracking.features.preprocessing import numpy_to_torch
class SBDT(BaseTracker):
def initialize_model(self):
'''
if not getattr(self, 'model_initialized', False):
self.params.model.initialize()
self.model_initialized = True
'''
self.params.model.initialize() # for reproduce the VOT result
# ------ MAIN INITIALIZE ------#
def initialize(self, image, state, *args, **kwargs):
self.frame_num = 1
# For debug show only
#image_show = image.copy()
# Fix random seed
torch.manual_seed(1024)
torch.cuda.manual_seed_all(1024)
# Initialize features
self.initialize_model()
# Get position and size (y, x, h, w)
self.target_pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])
self.target_sz = torch.Tensor([state[3], state[2]])
self.initial_target_sz = self.target_sz.clone()
# Set target scale and base target size (N)
self.img_sample_sz = torch.Tensor([math.sqrt(self.params.img_sample_area)]) * torch.ones(2)
self.target_sample_area = self.params.img_sample_area / self.params.search_area_scale**2
# Get initial search area, sample scale ratio and target size in sample image
self.search_area = torch.prod(self.target_sz * self.params.search_area_scale)
self.sample_scale = torch.sqrt(self.search_area / self.params.img_sample_area)
self.target_sample_sz = self.target_sz / self.sample_scale
# Generate centers of proposals for locator (N)
self.proposals_xc, self.proposals_yc = self.init_proposal_centers_function()
# Generate labels for locator (N)
self.labels = self.init_labels_function()
assert(self.labels.max().item()==1.0)
# Creat output score window (N)
self.output_window = None
if getattr(self.params, 'window_output', True):
self.output_window = self.init_output_window_function()
# Setup scale bounds (N)
self.min_scale_factor = self.params.min_scale_factor
self.max_scale_factor = self.params.max_scale_factor
# Extract initial transform samples
im_tensor = numpy_to_torch(image)
train_samples = self.generate_init_samples(im_tensor, self.target_pos, self.sample_scale).cuda()
# Setup scale bounds (Martin)
self.image_sz = torch.Tensor([im_tensor.shape[2], im_tensor.shape[3]])
self.min_scale_factor = torch.max(10 / self.initial_target_sz)
self.max_scale_factor = torch.min(self.image_sz / self.initial_target_sz)
# Generate initial proposals for locator
batch_size = train_samples.shape[0]
init_proposals = self.get_locator_proposals(self.target_sample_sz)
init_proposals = init_proposals.repeat(batch_size,1,1)
# Feature Extract
self.params.model.extract(train_samples, init_proposals)
# Initialize iounet
self.init_iou_net(self.target_pos, self.target_sz, self.sample_scale)
# Initialize locator features
self.initial_locator_features = self.params.model.locator_features.clone().mean(dim=0)
self.locator_features_model = self.params.model.locator_features.clone().mean(dim=0)
# Train locator model
self.regularization_matrix = None
self.locator_model = self.train_locator_model(self.locator_features_model)
# Initial the hard negative sample detect region
self.hard_negative_region_mask = self.init_hard_negative_region_function()
# Initial the weight of first frame
self.current_initial_frame_weight = 1.0
# Output result image
#self.output_result_image(image_show, state)
# ------ MAIN TRACK ------#
def track(self, image):
self.frame_num += 1
# For debug show only
#image_show = image.copy()
# Conver to tensor and GPU
image_cuda = self.numpy_to_tensor_gpu(image)
# ------- LOCALIZATION ------- #
sample_pos = self.target_pos.clone()
sample_scale = self.sample_scale.clone()
target_sample_sz = self.target_sample_sz.clone()
# sample and extract features
test_sample = sample_patch(image_cuda, sample_pos, sample_scale*self.img_sample_sz, self.img_sample_sz)
test_locator_proposals = self.get_locator_proposals(target_sample_sz)
self.params.model.extract(test_sample, test_locator_proposals)
# calcualte the localization score
test_locator_score = torch.mm(self.params.model.locator_features, self.locator_model)
if getattr(self.params, 'window_output', False):
test_locator_score = test_locator_score * self.output_window
max_score, max_id = torch.max(test_locator_score, dim=0)
max_score, max_id = max_score.item(), max_id.item()
# when target not found
if max_score < self.params.target_not_found_threshold:
# maintain the original target position and size
new_state = torch.cat((self.target_pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))
# Output result image
#self.output_result_image(image_show, new_state)
return new_state.tolist()
# update the target position
self.target_pos[0] = self.target_pos[0] + (self.proposals_yc[max_id].item() - self.img_sample_sz[1]*0.5) * sample_scale
self.target_pos[1] = self.target_pos[1] + (self.proposals_xc[max_id].item() - self.img_sample_sz[0]*0.5) * sample_scale
# refine the target position and size by IoUNet
new_pos, new_target_sz = self.refine_target_box(self.target_pos, self.target_sz, sample_pos, sample_scale)
# bound the taeget size
if new_target_sz is not None:
new_target_sz = torch.min(new_target_sz, self.initial_target_sz*self.max_scale_factor)
new_target_sz = torch.max(new_target_sz, self.initial_target_sz*self.min_scale_factor)
# update the target and sampling message
if new_pos is not None:
self.target_pos = new_pos.clone()
self.target_sz = new_target_sz.clone()
self.search_area = torch.prod(self.target_sz * self.params.search_area_scale)
self.sample_scale = torch.sqrt(self.search_area / self.params.img_sample_area)
self.target_sample_sz = self.target_sz / self.sample_scale
# Return new state
new_state = torch.cat((self.target_pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))
# Output result image
#self.output_result_image(image_show, new_state)
# ------- UPDAT MODEL------- #
train_sample = sample_patch(image_cuda, self.target_pos, self.sample_scale*self.img_sample_sz, self.img_sample_sz)
train_locator_proposals = self.get_locator_proposals(self.target_sample_sz)
self.params.model.extract(train_sample, train_locator_proposals, only_locator=True)
hard_flag = False
if self.params.hard_negative_mining:
train_locator_score = torch.mm(self.params.model.locator_features, self.locator_model)
train_locator_score = train_locator_score * self.hard_negative_region_mask
max_score, _ = torch.max(train_locator_score, dim=0)
if max_score > self.params.hard_negative_threshold:
hard_flag = True
if hard_flag:
learning_rate = self.params.hard_negative_learning_rate
else:
learning_rate = self.params.learning_rate
self.locator_features_model = (1 - learning_rate) * self.locator_features_model + learning_rate * self.params.model.locator_features
self.current_initial_frame_weight = (1 - learning_rate) * self.current_initial_frame_weight
if self.current_initial_frame_weight < self.params.init_samples_minimum_weight:
diff = self.params.init_samples_minimum_weight - self.current_initial_frame_weight
coff = diff / (1 - self.current_initial_frame_weight)
self.locator_features_model = (1 - coff) * self.locator_features_model + coff * self.initial_locator_features
self.current_initial_frame_weight = self.params.init_samples_minimum_weight
if (self.frame_num % self.params.train_skipping == 0) or (hard_flag):
self.locator_model = self.train_locator_model(self.locator_features_model, self.locator_model)
return new_state.tolist()
def numpy_to_tensor_gpu(self, image):
image = torch.from_numpy(image)
image = image.cuda()
image = image.permute(2,0,1).unsqueeze(0).to(torch.float32)
return image
def init_proposal_centers_function(self):
search_area_scale = self.params.search_area_scale
num_proposals = self.params.num_proposals_locator
num_proposals_sqrt = int(math.sqrt(num_proposals))
WIDTH, HEIGHT = self.img_sample_sz[0], self.img_sample_sz[1]
x_step = ((WIDTH - WIDTH/search_area_scale) / (num_proposals_sqrt-1))
y_step = ((HEIGHT - HEIGHT/search_area_scale) / (num_proposals_sqrt-1))
proposals_xc = torch.arange(num_proposals_sqrt).repeat(num_proposals_sqrt).type(torch.float32)
proposals_yc = torch.arange(num_proposals_sqrt).repeat(num_proposals_sqrt,1).t().reshape(-1).type(torch.float32)
proposals_xc = proposals_xc * x_step + WIDTH/(search_area_scale*2)
proposals_yc = proposals_yc * y_step + HEIGHT/(search_area_scale*2)
proposals_xc = proposals_xc.to(self.params.device)
proposals_yc = proposals_yc.to(self.params.device)
return proposals_xc, proposals_yc
def init_labels_function(self):
proposals_xc = self.proposals_xc
proposals_yc = self.proposals_yc
sigma_factor = self.params.output_sigma_factor
target_sample_area = self.target_sample_area
WIDTH, HEIGHT = self.img_sample_sz[0], self.img_sample_sz[1]
x_dist = proposals_xc - (WIDTH * 0.5).item()
y_dist = proposals_yc - (HEIGHT * 0.5).item()
sigma = sigma_factor * math.sqrt(target_sample_area)
labels = torch.exp(-0.5 * (x_dist.pow(2)+y_dist.pow(2))/sigma**2)
labels = labels.to(self.params.device).reshape(-1,1)
return labels
def init_output_window_function(self):
proposals_xc = self.proposals_xc
proposals_yc = self.proposals_yc
window_min = self.params.window_min
sigma_factor = self.params.window_sigma_factor
target_sample_area = self.target_sample_area
WIDTH, HEIGHT = self.img_sample_sz[0], self.img_sample_sz[1]
x_dist = proposals_xc - (WIDTH * 0.5).item()
y_dist = proposals_yc - (HEIGHT * 0.5).item()
sigma = sigma_factor * math.sqrt(target_sample_area)
output_window = torch.exp(-0.5 * (x_dist.pow(2)+y_dist.pow(2))/sigma**2)
output_window = output_window.clamp(window_min)
output_window = output_window.to(self.params.device).reshape(-1,1)
return output_window
def init_hard_negative_region_function(self):
proposals_xc = self.proposals_xc
proposals_yc = self.proposals_yc
img_sample_area = self.params.img_sample_area
distance_ratio = self.params.hard_negative_distance_ratio
region_mask = torch.zeros(proposals_xc.shape, device=self.params.device)
x_distance = proposals_xc - (self.img_sample_sz[0] * 0.5).item()
y_distance = proposals_yc - (self.img_sample_sz[1] * 0.5).item()
distance = torch.sqrt(x_distance.pow(2)+y_distance.pow(2))
distance_threshold = math.sqrt(img_sample_area * distance_ratio**2)
region_mask[distance>distance_threshold] = 1.0
region_mask = region_mask.view(-1,1)
return region_mask
def generate_init_samples(self, im: torch.Tensor, target_pos, sample_scale) -> TensorList:
"""Generate augmented initial samples."""
# Compute augmentation size
aug_expansion_factor = getattr(self.params, 'augmentation_expansion_factor', None)
aug_expansion_sz = self.img_sample_sz.clone()
aug_output_sz = None
if aug_expansion_factor is not None and aug_expansion_factor != 1:
aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()
aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2
aug_expansion_sz = aug_expansion_sz.float()
aug_output_sz = self.img_sample_sz.long().tolist()
# Random shift operator
get_rand_shift = lambda: None
# Create transofmations
self.transforms = [augmentation.Identity(aug_output_sz)]
if 'shift' in self.params.augmentation:
self.transforms.extend([augmentation.Translation(shift, aug_output_sz) for shift in self.params.augmentation['shift']])
if 'relativeshift' in self.params.augmentation:
get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist()
self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz) for shift in self.params.augmentation['relativeshift']])
if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']:
self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))
if 'blur' in self.params.augmentation:
self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in self.params.augmentation['blur']])
if 'scale' in self.params.augmentation:
self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in self.params.augmentation['scale']])
if 'rotate' in self.params.augmentation:
self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in self.params.augmentation['rotate']])
init_sample = sample_patch(im, target_pos, sample_scale*aug_expansion_sz, aug_expansion_sz)
init_samples = torch.cat([T(init_sample) for T in self.transforms])
if not self.params.use_augmentation:
init_samples = init_samples[0:1,...]
return init_samples
def init_iou_net(self, target_pos, target_sz, sample_scale):
# Setup IoU net
self.iou_predictor = self.params.model.iou_predictor
for p in self.iou_predictor.parameters():
p.requires_grad = False
# Get target boxes and convert
target_boxes = self.get_iounet_box(target_pos, target_sz, target_pos.round(), sample_scale)
target_boxes = target_boxes.unsqueeze(0).to(self.params.device)
# Get iou backbone features
iou_backbone_features = self.params.model.iounet_backbone_features
# Remove other augmentations such as rotation
iou_backbone_features = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_features])
# Extract target IoU feat
with torch.no_grad():
target_iou_feat = self.iou_predictor.get_filter(iou_backbone_features, target_boxes)
self.target_iou_feat = TensorList([x.detach().mean(0) for x in target_iou_feat])
def get_iounet_box(self, target_pos, target_sz, sample_pos, sample_scale):
"""All inputs in original image coordinates"""
box_center = (target_pos - sample_pos) / sample_scale + (self.img_sample_sz - 1) / 2
box_sz = target_sz / sample_scale
target_ul = box_center - (box_sz - 1) / 2
return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])
def get_locator_proposals(self, target_sample_sz):
proposals_xc = self.proposals_xc
proposals_yc = self.proposals_yc
num_proposals_locator = self.params.num_proposals_locator
proposals = torch.zeros(num_proposals_locator, 4, device=self.params.device)
proposals[:,0] = proposals_xc - (target_sample_sz[1]*0.5).item()
proposals[:,1] = proposals_yc - (target_sample_sz[0]*0.5).item()
proposals[:,2] = target_sample_sz[1].item()
proposals[:,3] = target_sample_sz[0].item()
return proposals.unsqueeze(0)
def train_locator_model(self, locator_features, model=None):
regularization = self.params.regularization
if self.regularization_matrix is None:
self.regularization_matrix = regularization*torch.eye(locator_features.shape[1], device=self.params.device)
train_XTX = torch.mm(locator_features.t(), locator_features)
train_XTX = train_XTX + self.regularization_matrix
train_XTY = torch.mm(locator_features.t(), self.labels)
if model is None:
model = torch.potrs(train_XTY, torch.potrf(train_XTX))
else:
for _ in range(30):
model, _ = torch.trtrs(train_XTY - torch.mm(torch.triu(train_XTX, diagonal=1), model), torch.tril(train_XTX, diagonal=0), upper=False)
return model
def refine_target_box(self, target_pos, target_sz, sample_pos, sample_scale):
# Initial box for refinement
init_box = self.get_iounet_box(target_pos, target_sz, sample_pos, sample_scale)
# Extract features from the relevant scale
iou_features = self.params.model.iounet_features
iou_features = TensorList([x[0:1,...] for x in iou_features])
init_boxes = init_box.view(1,4).clone()
if self.params.num_init_random_boxes > 0:
# Get random initial boxes
square_box_sz = init_box[2:].prod().sqrt()
rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)])
minimal_edge_size = init_box[2:].min()/3
rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor
new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size)
new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2]
init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1)
init_boxes = torch.cat([init_box.view(1,4), init_boxes])
# Refine boxes by maximizing iou
output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)
# Remove weird boxes with extreme aspect ratios
output_boxes[:, 2:].clamp_(1)
aspect_ratio = output_boxes[:,2] / output_boxes[:,3]
keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio)
output_boxes = output_boxes[keep_ind,:]
output_iou = output_iou[keep_ind]
# If no box found
if output_boxes.shape[0] == 0:
return None, None
# Take average of top k boxes
k = getattr(self.params, 'iounet_k', 5)
topk = min(k, output_boxes.shape[0])
_, inds = torch.topk(output_iou, topk)
predicted_box = output_boxes[inds, :].mean(0).cpu()
predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0).cpu()
# Update position
new_pos = predicted_box[:2] + predicted_box[2:]/2 - (self.img_sample_sz - 1) / 2
new_pos = new_pos.flip((0,)) * sample_scale + sample_pos
# Linear interpolation to update the target size
new_target_sz = predicted_box[2:].flip((0,)) * sample_scale
new_target_sz = self.params.scale_damp * self.target_sz + (1 - self.params.scale_damp) * new_target_sz
return new_pos, new_target_sz
def optimize_boxes(self, iou_features, init_boxes):
# Optimize iounet boxes
output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)
step_length = self.params.box_refinement_step_length
for i_ in range(self.params.box_refinement_iter):
# forward pass
bb_init = output_boxes.clone().detach()
bb_init.requires_grad = True
outputs = self.iou_predictor.predict_iou(self.target_iou_feat, iou_features, bb_init)
if isinstance(outputs, (list, tuple)):
outputs = outputs[0]
outputs.backward(gradient = torch.ones_like(outputs))
# Update proposal
output_boxes = bb_init + step_length * (bb_init.grad*100).round()/100 * bb_init[:, :, 2:].repeat(1, 1, 2)
output_boxes.detach_()
step_length *= self.params.box_refinement_step_decay
return output_boxes.view(-1,4), outputs.detach().view(-1)
def output_result_image(self, image, state):
if self.params.output_image:
if not os.path.exists(self.params.output_image_path):
os.mkdir(output_dir)
cv2.rectangle(image, (int(state[0]),int(state[1])),(int(state[0]+state[2]),int(state[1]+state[3])), (255,0,0), 3)
cv2.imwrite(os.path.join(output_dir,'{}.jpg'.format(self.frame_num)), cv.cvtColor(image, cv.COLOR_RGB2BGR))
'''
import cv2
for i in range(train_samples.shape[0]):
output_dir = '/home/zhenglinyu2/SBDT_tracking/debug/transform_image/'
count = len(os.listdir(output_dir))
transform_image = train_samples[i,...].permute(1,2,0)
transform_image = transform_image.data.numpy()
cv2.imwrite(os.path.join(output_dir,'{}.jpg'.format(count+1)),transform_image))
'''
'''
torch.cuda.synchronize()
start = time.time()
torch.cuda.synchronize()
print(time.time() - start)
'''
|
py | b40def1ea838b445bc965b674d92a5ad1ffcbb3f | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import sys
from kfp.components import executor as component_executor
from kfp.components import kfp_config
from kfp.components import utils
def _setup_logging():
logging_format = '[KFP Executor %(asctime)s %(levelname)s]: %(message)s'
logging.basicConfig(
stream=sys.stdout, format=logging_format, level=logging.INFO)
def executor_main():
_setup_logging()
parser = argparse.ArgumentParser(description='KFP Component Executor.')
parser.add_argument(
'--component_module_path',
type=str,
help='Path to a module containing the KFP component.')
parser.add_argument(
'--function_to_execute',
type=str,
required=True,
help='The name of the component function in '
'--component_module_path file that is to be executed.')
parser.add_argument(
'--executor_input',
type=str,
help='JSON-serialized ExecutorInput from the orchestrator. '
'This should contain inputs and placeholders for outputs.')
args, _ = parser.parse_known_args()
func_name = args.function_to_execute
module_path = None
module_directory = None
module_name = None
if args.component_module_path is not None:
logging.info(
'Looking for component `{}` in --component_module_path `{}`'.format(
func_name, args.component_module_path))
module_path = args.component_module_path
module_directory = os.path.dirname(args.component_module_path)
module_name = os.path.basename(args.component_module_path)[:-len('.py')]
else:
# Look for module directory using kfp_config.ini
logging.info('--component_module_path is not specified. Looking for'
' component `{}` in config file `kfp_config.ini`'
' instead'.format(func_name))
config = kfp_config.KFPConfig()
components = config.get_components()
if not components:
raise RuntimeError('No components found in `kfp_config.ini`')
try:
module_path = components[func_name]
except KeyError:
raise RuntimeError(
'Could not find component `{}` in `kfp_config.ini`. Found the '
' following components instead:\n{}'.format(
func_name, components))
module_directory = str(module_path.parent)
module_name = str(module_path.name)[:-len('.py')]
logging.info(
'Loading KFP component "{}" from {} (directory "{}" and module name'
' "{}")'.format(func_name, module_path, module_directory, module_name))
module = utils.load_module(
module_name=module_name, module_directory=module_directory)
executor_input = json.loads(args.executor_input)
function_to_execute = getattr(module, func_name)
logging.info('Got executor_input:\n{}'.format(
json.dumps(executor_input, indent=4)))
executor = component_executor.Executor(
executor_input=executor_input, function_to_execute=function_to_execute)
executor.execute()
if __name__ == '__main__':
executor_main()
|
py | b40def282cbefc189ce45e69717b46e08ce7b523 | import os
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, random_split
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
import datasets
@datasets.register('mnist-ds')
class mnist_wrapper(Dataset):
def __init__(self, mnist=None, train=True, root_dir=None):
if mnist is not None:
self._dataset = mnist
else:
self.root_dir = root_dir
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
self._dataset = torchvision.datasets.MNIST(self.root_dir, train=train, download=True, transform=self.transform)
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
img, label = self._dataset[idx]
return {
'img': img,
'label': label,
}
@datasets.register('mnist-dm')
class MNISTDataModule(pl.LightningDataModule):
def __init__(self, root_dir, batch_size):
super().__init__()
self.root_dir = root_dir
self.batch_size = batch_size
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
def setup(self, stage=None):
if stage == "fit" or stage is None:
mnist_full = torchvision.datasets.MNIST(self.root_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
if stage == "test" or stage is None:
self.mnist_test = torchvision.datasets.MNIST(self.root_dir, train=False, transform=self.transform)
def prepare_data(self):
# download
torchvision.datasets.MNIST(self.root_dir, train=True, download=True)
torchvision.datasets.MNIST(self.root_dir, train=False, download=True)
def general_loader(self, dataset, tag):
return DataLoader(
dataset,
shuffle=(tag=='train'),
num_workers=os.cpu_count(),
batch_size=self.batch_size,
pin_memory=True,
)
def train_dataloader(self):
return self.general_loader(mnist_wrapper(self.mnist_train), 'train')
def val_dataloader(self):
return self.general_loader(mnist_wrapper(self.mnist_val), 'val')
def test_dataloader(self):
return self.general_loader(mnist_wrapper(self.mnist_test), 'test')
|
py | b40defafabc210d05df293402d7a863f4d5cefe5 | class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
ans = []
n = len(candidates)
visited = set()
candidates.sort()
def dfs(idx: int, cur: int, path: List[int]):
if cur == 0:
ans.append(path[:])
return
elif idx == n:
return
# 当前数字与前面数字相同并且前面数字没有在路径中,则忽略这个数字
if (
idx != 0
and candidates[idx] == candidates[idx - 1]
and (idx - 1) not in visited
):
dfs(idx + 1, cur, path)
return
# 1.加入这个数字
if candidates[idx] <= cur:
path.append(candidates[idx])
visited.add(idx)
# 向下递归时考虑下一个数字
dfs(idx + 1, cur - candidates[idx], path)
# 消除影响
path.pop()
visited.remove(idx)
# 2.不加入这个数字
dfs(idx + 1, cur, path)
dfs(0, target, [])
return ans
|
py | b40df00d6c45cca40d02f228b61b3cfd73c20f27 | from ._stopping_criterion import StoppingCriterion
from ..accumulate_data import MeanVarData
from ..discrete_distribution import IIDStdUniform
from ..true_measure import Gaussian, BrownianMotion
from ..integrand import Keister, AsianOption
from ..util import MaxSamplesWarning
from numpy import *
from scipy.stats import norm
from time import time
import warnings
class CubMCCLT(StoppingCriterion):
"""
Stopping criterion based on the Central Limit Theorem.
>>> k = Keister(IIDStdUniform(2,seed=7))
>>> sc = CubMCCLT(k,abs_tol=.05)
>>> solution,data = sc.integrate()
>>> solution
1.801...
>>> data
Solution: 1.8010
Keister (Integrand Object)
IIDStdUniform (DiscreteDistribution Object)
d 2^(1)
seed 7
mimics StdUniform
Lebesgue (TrueMeasure Object)
transform Gaussian (TrueMeasure Object)
mean 0
covariance 2^(-1)
decomp_type pca
CubMCCLT (StoppingCriterion Object)
inflate 1.200
alpha 0.010
abs_tol 0.050
rel_tol 0
n_init 2^(10)
n_max 10000000000
MeanVarData (AccumulateData Object)
levels 1
solution 1.801
n 5741
n_total 6765
error_bound 0.051
confid_int [1.75 1.852]
time_integrate ...
>>> ac = AsianOption(IIDStdUniform(),
... multi_level_dimensions = [2,4,8])
>>> sc = CubMCCLT(ac,abs_tol=.05)
>>> solution,data = sc.integrate()
"""
parameters = ['inflate','alpha','abs_tol','rel_tol','n_init','n_max']
def __init__(self, integrand, abs_tol=1e-2, rel_tol=0., n_init=1024., n_max=1e10,
inflate=1.2, alpha=0.01):
"""
Args:
integrand (Integrand): an instance of Integrand
inflate (float): inflation factor when estimating variance
alpha (float): significance level for confidence interval
abs_tol (float): absolute error tolerance
rel_tol (float): relative error tolerance
n_max (int): maximum number of samples
"""
# Set Attributes
self.abs_tol = float(abs_tol)
self.rel_tol = float(rel_tol)
self.n_init = float(n_init)
self.n_max = float(n_max)
self.alpha = float(alpha)
self.inflate = float(inflate)
# QMCPy Objs
self.integrand = integrand
self.true_measure = self.integrand.true_measure
self.discrete_distrib = self.integrand.discrete_distrib
# Verify Compliant Construction
allowed_levels = ['single','fixed-multi']
allowed_distribs = ["IIDStdUniform"]
super(CubMCCLT,self).__init__(allowed_levels, allowed_distribs)
def integrate(self):
""" See abstract method. """
# Construct AccumulateData Object to House Integration data
self.data = MeanVarData(self, self.integrand, self.true_measure, self.discrete_distrib, self.n_init)
t_start = time()
# Pilot Sample
self.data.update_data()
# use cost of function values to decide how to allocate
temp_a = self.data.t_eval ** 0.5
temp_b = (temp_a * self.data.sighat).sum()
# samples for computation of the mean
# n_mu_temp := n such that confidence intervals width and conficence will be satisfied
tol_up = max(self.abs_tol, abs(self.data.solution) * self.rel_tol)
z_star = -norm.ppf(self.alpha / 2.)
n_mu_temp = ceil(temp_b * (self.data.sighat / temp_a) * \
(z_star * self.inflate / tol_up)**2)
# n_mu := n_mu_temp adjusted for previous n
self.data.n_mu = maximum(self.data.n, n_mu_temp)
self.data.n += self.data.n_mu.astype(int)
if self.data.n_total + self.data.n.sum() > self.n_max:
# cannot generate this many new samples
warning_s = """
Alread generated %d samples.
Trying to generate %d new samples, which would exceed n_max = %d.
The number of new samples will be decrease proportionally for each integrand.
Note that error tolerances may no longer be satisfied""" \
% (int(self.data.n_total), int(self.data.n.sum()), int(self.n_max))
warnings.warn(warning_s, MaxSamplesWarning)
# decrease n proportionally for each integrand
n_decease = self.data.n_total + self.data.n.sum() - self.n_max
dec_prop = n_decease / self.data.n.sum()
self.data.n = floor(self.data.n - self.data.n * dec_prop)
# Final Sample
self.data.update_data()
# CLT confidence interval
sigma_up = (self.data.sighat ** 2 / self.data.n_mu).sum(0) ** 0.5
self.data.error_bound = z_star * self.inflate * sigma_up
self.data.confid_int = self.data.solution + self.data.error_bound * array([-1, 1])
self.data.time_integrate = time() - t_start
return self.data.solution, self.data
def set_tolerance(self, abs_tol=None, rel_tol=None):
"""
See abstract method.
Args:
abs_tol (float): absolute tolerance. Reset if supplied, ignored if not.
rel_tol (float): relative tolerance. Reset if supplied, ignored if not.
"""
if abs_tol != None: self.abs_tol = abs_tol
if rel_tol != None: self.rel_tol = rel_tol |
py | b40df0303f8abde0680ff7f5cccb14e409be1bf3 | import subprocess
subprocess.Popen(['python','hear_auto.py'])
subprocess.Popen(['python','hear_auto.py'])
subprocess.Popen(['python','hear_auto.py'])
#subprocess.Popen(['python','hear_auto.py'])
#subprocess.Popen(['python','hear_auto.py'])
|
py | b40df10c24a07a3011201cfc12c6461638414772 | import os
import sys
import tempfile
import requests
from pymystem3.mystem import _get_tarball_url
def install(path):
"""
Install mystem binary as :py:const:`~pymystem3.constants.MYSTEM_BIN`.
Overwrite if already installed.
"""
url = _get_tarball_url()
print("Installing mystem to %s from %s" % (path, url), file=sys.stdout)
if not os.path.isdir(path):
os.makedirs(path)
tmp_fd, tmp_path = tempfile.mkstemp()
try:
r = requests.get(url, stream=True)
with os.fdopen(tmp_fd, 'wb') as fd:
for chunk in r.iter_content(64 * 1024):
fd.write(chunk)
fd.flush()
if url.endswith('.tar.gz'):
import tarfile
tar = tarfile.open(tmp_path)
try:
tar.extract('mystem', path)
finally:
tar.close()
elif url.endswith('.zip'):
import zipfile
zip = zipfile.ZipFile(tmp_path)
try:
zip.extractall(path)
finally:
zip.close()
else:
raise NotImplementedError("Could not install mystem from %s" % url)
finally:
os.unlink(tmp_path) |
py | b40df1837192c6e31ff3461c36add7dca4027c9d | import baza
import sqlite3
import random
import hashlib
conn = sqlite3.connect('olimpijske-igre.db')
baza.ustvari_bazo_ce_ne_obstaja(conn)
conn.execute('PRAGMA foreign_keys = ON')
def mozna_leta():
poizvedba = """
SELECT leto
FROM olimpijske_igre
"""
leta = conn.execute(poizvedba)
return [leto for leto, in leta]
def poisci_olimpijske(letoPodano):
"""
Funkcija, ki vrne kljuc
>>> poisci_olimpijske('1948')
11
"""
poizvedba = """
SELECT kljuc
FROM olimpijske_igre
WHERE leto = ?
"""
indeks, = conn.execute(poizvedba, [letoPodano]).fetchone()
return indeks
def podatki_olimpijske(kljucPodan):
"""
Funkcija, ki vrne začetek, konec, kraj OI in stevilo drzav.
>>> poisci_olimpijske('11')
[29.7., 14.8., London, 44]
"""
poizvedba = """
SELECT zacetek, konec, mesto, st_drzav
FROM olimpijske_igre
WHERE kljuc = ?
"""
return conn.execute(poizvedba, [kljucPodan]).fetchone()
def poisci_osebe(niz):
"""
Funkcija, ki vrne IDje vseh oseb, katerih ime vsebuje dani niz.
>>> poisci_osebe('elia')
[8, 42, 457, 497]
"""
poizvedba = """
SELECT id
FROM osebe
WHERE ime LIKE ?
ORDER BY ime
"""
idji_oseb = []
for (id_osebe,) in conn.execute(poizvedba, ['%' + niz + '%']):
idji_oseb.append(id_osebe)
return idji_oseb
def podatki_oseb(id_oseb):
"""
Vrne osnovne podatke vseh oseb z danimi IDji.
>>> podatki_oseb([8, 42, 457, 497])
[(8, 'Belia Verduin'), (42, 'Othelia Scullion'), (457, 'Delia Louden'), (497, 'Rafaelia Lambot')]
"""
poizvedba = """
SELECT id, ime, priimek
FROM osebe
WHERE id IN ({})
""".format(','.join('?' for _ in range(len(id_oseb))))
return conn.execute(poizvedba, id_oseb).fetchall()
def podatki_osebe(id_osebe):
"""
Vrne podatke o osebi z danim IDjem.
>>> podatki_osebe(8)
('Belia Verduin', )
"""
poizvedba = """
SELECT ime,priimek FROM osebe WHERE id = ?
"""
cur = conn.cursor()
cur.execute(poizvedba, [id_osebe])
osnovni_podatki = cur.fetchone()
if osnovni_podatki is None:
return None
else:
ime,priimek = osnovni_podatki
poizvedba_za_podatke = """
SELECT sporti.sport, discipline.disciplina, uvrstitve.mesto
FROM uvrstitve
JOIN osebe ON uvrstitve.id_osebe = osebe.id
JOIN discipline ON discipline.id = uvrstitve.id_disciplina
JOIN sporti ON sporti.kljuc = discipline.id_sport
WHERE osebe.id = ?
ORDER BY uvrstitve.mesto
"""
uvrstitve = conn.execute(poizvedba_za_podatke, [id_osebe]).fetchall()
return ime, priimek, uvrstitve
def poisci_discipline(disciplina):
"""
Funkcija, ki vrne ID discipline.
>>> poisci_discipline('krogla')
[9]
"""
poizvedba = """
SELECT id
FROM discipline
WHERE disciplina = ?
"""
return conn.execute(poizvedba, [disciplina]).fetchone()
def podatki_disciplina(id_disciplina):
"""
Vrne podatke o prvouvrščenih osebah v dani disciplini z danim IDjem.
>>> podatki_disciplina(100m delfin)
(Crissy Keyhoe (1976) )
"""
poizvedba = """
SELECT osebe.ime, osebe.priimek, olimpijske_igre.leto
FROM osebe
JOIN uvrstitve ON osebe.id = uvrstitve.id_osebe
JOIN olimpijske_igre ON olimpijske_igre.kljuc = uvrstitve.kljuc_leto
WHERE id_disciplina = ? AND uvrstitve.mesto == 1
"""
osebe = []
for ime,priimek,leto in conn.execute(poizvedba, [id_disciplina]):
osebe.append((ime,priimek,leto))
return osebe
def dodaj_OI(leto, mesto, zacetek, konec, st_drzav):
with conn:
id = conn.execute("""
INSERT INTO olimpijske_igre (leto, mesto, zacetek, konec, st_drzav)
VALUES (?,?,?,?,?)
""",[leto, mesto, zacetek, konec, st_drzav]).lastrowid
return id
def zakodiraj(geslo, sol=None):
if sol is None:
sol = ''.join(chr(random.randint(65, 122)) for _ in range(16))
posoljeno_geslo = geslo + '$' + sol
zakodirano_geslo = hashlib.sha512(posoljeno_geslo.encode()).hexdigest()
return zakodirano_geslo, sol
def preveri_geslo(uporabnisko_ime, geslo):
poizvedba = """
SELECT geslo, sol FROM uporabniki
WHERE uporabnisko_ime = ?
"""
uporabnik = conn.execute(poizvedba, [uporabnisko_ime]).fetchone()
if uporabnik is None:
return False
shranjeno_geslo, sol = uporabnik
zakodirano_geslo, _ = zakodiraj(geslo, sol)
return shranjeno_geslo == zakodirano_geslo
def ustvari_uporabnika(uporabnisko_ime, geslo):
poizvedba = """
INSERT INTO uporabniki
(uporabnisko_ime, geslo, sol)
VALUES (?, ?, ?)
"""
with conn:
zakodirano_geslo, sol = zakodiraj(geslo)
conn.execute(poizvedba, [uporabnisko_ime, zakodirano_geslo, sol]).fetchone()
return True |
py | b40df29169cd70e3331b51f81736d5d246f4d491 | #!/usr/bin/env python2
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Copyright (c) 2014-2020 The Skicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import SkicoinTestFramework
from test_framework.util import *
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(SkicoinTestFramework):
'''
Test longpolling with getblocktemplate.
'''
def run_test(self):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
wait_to_sync(self.nodes[0])
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
|
py | b40df38571178e5e40d4be41df5c90d0718446a0 | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import pickle
from dataset.mnist import load_mnist
from common.functions import sigmoid, softmax
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
return x_test, t_test
def init_network():
with open("sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
"""
784 = 28 * 28
W1: (784, 50)
b1: 50
x ‧ W1 + b1 -> a1
W2: (50, 100)
b2: 100
a1 ‧ W2 + b2 -> a2
W3: (100, 10)
b3: 10
a2 ‧ W3 + b3 -> a3
"""
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
"""
print('784 = 28 * 28')
print('W1: ', len(W1), ',', len(W1[0]))
print('b1: ', len(b1))
print('x ‧ W1 + b1 -> a1')
print('W2: ', len(W2), ',', len(W2[0]))
print('b2: ', len(b2))
print('a1 ‧ W2 + b2 -> a2')
print('W3: ', len(W3), ',', len(W3[0]))
print('b3: ', len(b3))
print('a2 ‧ W3 + b3 -> a3')
"""
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
p= np.argmax(y) # 最も確率の高い要素のインデックスを取得
if p == t[i]:
accuracy_cnt += 1
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
# Result stdout:
# Accuracy:0.9352 |
py | b40df4563c8eb7dbb72ecf06666dc6c5e1fe4642 | from unittest import TestCase, mock, main
from test_trymerge import mocked_gh_graphql
from trymerge import GitHubPR
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
from typing import Any
from tryrebase import rebase_onto
class TestRebase(TestCase):
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('gitutils.GitRepo._run_git')
@mock.patch('tryrebase.gh_post_comment')
def test_rebase(self, mocked_post_comment: Any, mocked_run_git: Any, mocked_gql: Any) -> None:
"Tests rebase successfully"
pr = GitHubPR("pytorch", "pytorch", 31093)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
rebase_onto(pr, repo, 'master')
calls = [mock.call('fetch', 'origin', 'pull/31093/head:pull/31093/head'),
mock.call('rebase', 'refs/remotes/origin/master', 'pull/31093/head'),
mock.call('push', '-f', 'https://github.com/mingxiaoh/pytorch.git', 'pull/31093/head:master')]
mocked_run_git.assert_has_calls(calls)
self.assertTrue(
"Successfully rebased `master` onto `refs/remotes/origin/master`" in mocked_post_comment.call_args[0][3])
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('gitutils.GitRepo._run_git')
@mock.patch('tryrebase.gh_post_comment')
def test_rebase_to_stable(self, mocked_post_comment: Any, mocked_run_git: Any, mocked_gql: Any) -> None:
"Tests rebase to viable/strict successfully"
pr = GitHubPR("pytorch", "pytorch", 31093)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
rebase_onto(pr, repo, 'viable/strict', False)
calls = [mock.call('fetch', 'origin', 'pull/31093/head:pull/31093/head'),
mock.call('rebase', 'refs/remotes/origin/viable/strict', 'pull/31093/head'),
mock.call('push', '-f', 'https://github.com/mingxiaoh/pytorch.git', 'pull/31093/head:master')]
mocked_run_git.assert_has_calls(calls)
self.assertTrue(
"Successfully rebased `master` onto `refs/remotes/origin/viable/strict`" in mocked_post_comment.call_args[0][3])
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('gitutils.GitRepo._run_git', return_value="Everything up-to-date")
@mock.patch('tryrebase.gh_post_comment')
def test_no_need_to_rebase(self, mocked_post_comment: Any, mocked_run_git: Any, mocked_gql: Any) -> None:
"Tests branch already up to date"
pr = GitHubPR("pytorch", "pytorch", 31093)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
rebase_onto(pr, repo, 'master')
calls = [mock.call('fetch', 'origin', 'pull/31093/head:pull/31093/head'),
mock.call('rebase', 'refs/remotes/origin/master', 'pull/31093/head'),
mock.call('push', '-f', 'https://github.com/mingxiaoh/pytorch.git', 'pull/31093/head:master')]
mocked_run_git.assert_has_calls(calls)
self.assertTrue(
"Tried to rebase and push PR #31093, but it was already up to date" in mocked_post_comment.call_args[0][3])
if __name__ == "__main__":
main()
|
py | b40df480f6d3ea698cb1017fa48a3c0fc367e991 | # -*- coding: utf-8 -*-
__author__ = 'ooo'
__date__ = '2018/12/15 12:17'
import torch
import shutil
import os, time
import math
import numpy as np
from datetime import datetime
from collections import OrderedDict
from torchvision import transforms
from collections import namedtuple
import torch.nn as nn
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TopkAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, k=5):
self.k = k
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.topk = [0]
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.topk = sorted(record_topk_value(self.topk, val, self.k))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state, is_best, filename='checkpoint.pth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth')
def adjust_learning_rate_org(optimizer, epoch, lr_start=0.01, decay_rate=0.1, decay_time=30):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr_start * (decay_rate ** (epoch // decay_time))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate(optimizer, epoch, cfg=None):
"""
Policy1: 'regular':
lr will decay *rate from lr_start, for every n epoch, when the epoch > start
lr_start = 0.01
decay_policy = 'regular'
decay_rate = 0.1
decay_time = n
decay_start = start
Policy2: 'appoint':
lr will decay *rate from lr_start, for the epoch appointed in [(rate1, ep1), (rate2, ep2), ...]
"""
lr_start, lr_end, decay_policy, decay_rate, decay_time, decay_start, decay_appoint = \
cfg.lr_start, cfg.lr_end, cfg.lr_decay_policy, cfg.lr_decay_rate, cfg.lr_decay_time, cfg.lr_decay_start, cfg.lr_decay_appoint
current_lr = optimizer.param_groups[0]['lr']
if decay_policy == 'regular':
if epoch >= decay_start:
current_lr = lr_start * (decay_rate ** ((epoch - decay_start) // decay_time + 1))
if current_lr <= lr_end:
current_lr = lr_end
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
return current_lr
elif decay_policy == 'appoint':
for ep, rate in decay_appoint:
if epoch == ep:
current_lr = current_lr * rate
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
return current_lr
elif decay_policy == 'original':
start_epoch = 0 if cfg.start_epoch == 0 else 1
current_lr = lr_start * (decay_rate ** ((epoch - start_epoch) // decay_time))
if current_lr <= lr_end:
current_lr = lr_end
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
return current_lr
else:
raise NotImplementedError
def adjust_batch_size(current_bsize, epoch, cfg):
if cfg.bs_decay_policy == 'frozen':
return current_bsize
if cfg.bs_decay_policy == 'appoint':
for ep, rate in cfg.bs_decay_appoint:
if epoch == ep:
current_bsize = current_bsize * rate
return current_bsize
if cfg.bs_decay_policy == 'regular':
if epoch >= cfg.bs_decay_start:
if current_bsize <= cfg.bsize_end:
current_bsize = cfg.bsize_end
else:
decay_rate = cfg.bs_decay_rate ** ((epoch - cfg.bs_decay_start) // cfg.bs_decay_interval + 1)
current_bsize = cfg.bsize_start * decay_rate
return current_bsize
def resume_from_ckpt(model, optimizer, resume):
if os.path.isfile(resume):
print("\nloading checkpoint file from %s ..." % (resume,))
checkpoint = torch.load(f=resume)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
best_prec1 = checkpoint['best_prec1']
print('loaded done at epoch {} ...\n'.format(start_epoch))
return start_epoch, best_prec1
else:
raise FileNotFoundError('\ncan not find the ckpt file @ %s ...' % resume)
def model_from_ckpt(model, ckpt):
if os.path.isfile(ckpt):
print("\nloading checkpoint file from %s ..." % (ckpt,))
checkpoint = torch.load(f=ckpt)
try:
model.load_state_dict(checkpoint['state_dict'])
except KeyError:
model.load_state_dict(checkpoint['model'])
except:
raise KeyError('check model KEY name in ckpt file.')
return model
else:
raise FileNotFoundError('check ckpt file exist!')
def calculate_params_scale(model, format=''):
if isinstance(model, str) and model.endswith('.ckpt'):
checkpoint = torch.load(model)
try:
model = checkpoint['state_dict']
except KeyError:
model = checkpoint['model']
except:
raise KeyError('Please check the model KEY in ckpt!')
scale = 0
if isinstance(model, torch.nn.Module):
# method 1
scale = sum([param.nelement() for param in model.parameters()])
# model_parameters = filter(lambda p: p.requires_grad, model.parameters())
# scale = sum([np.prod(p.size()) for p in model_parameters])
elif isinstance(model, OrderedDict):
# method 3
for key, val in model.items():
if not isinstance(val, torch.Tensor):
continue
scale += val.numel()
if format == 'million': # (百万)
scale /= 1000000
print("\n*** Number of params: " + str(scale) + '\tmillion...\n')
return scale
else:
print("\n*** Number of params: " + str(scale) + '\t...')
return scale
def calculate_FLOPs_scale(model, input_size, multiply_adds=False, use_gpu=False):
"""
forked from FishNet @ github
https://www.zhihu.com/question/65305385/answer/256845252
https://blog.csdn.net/u011501388/article/details/81061024
https://blog.csdn.net/xidaoliang/article/details/88191910
no bias: K^2 * IO * HW
multiply_adds : False in FishNet Paper, but True in DenseNet paper
"""
assert isinstance(model, torch.nn.Module)
USE_GPU = use_gpu and torch.cuda.is_available()
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (
2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv.append(flops)
def deconv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (
2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_deconv.append(flops)
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
def bn_hook(self, input, output):
list_bn.append(input[0].nelement())
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.ConvTranspose2d):
net.register_forward_hook(deconv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
if isinstance(net, torch.nn.ReLU):
net.register_forward_hook(relu_hook)
if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
net.register_forward_hook(pooling_hook)
return
for c in childrens:
foo(c)
multiply_adds = multiply_adds
list_conv, list_deconv, list_bn, list_relu, list_linear, list_pooling = [], [], [], [], [], []
foo(model)
input = torch.rand(2, 3, input_size, input_size)
if USE_GPU:
input = input.cuda()
model = model.cuda()
_ = model(input)
total_flops = (sum(list_conv) + sum(list_deconv) + sum(list_linear)
+ sum(list_bn) + sum(list_relu) + sum(list_pooling))
print(' + Number of FLOPs: %.5fG' % (total_flops / 1e9 / 2))
def calculate_layers_num(model, layers=('conv2d', 'classifier')):
assert isinstance(model, torch.nn.Module)
type_dict = {'conv2d': torch.nn.Conv2d,
'bnorm2d': torch.nn.BatchNorm2d,
'relu': torch.nn.ReLU,
'fc': torch.nn.Linear,
'classifier': torch.nn.Linear,
'linear': torch.nn.Linear,
'deconv2d': torch.nn.ConvTranspose2d}
nums_list = []
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, type_dict[layer]):
pass
return 1
count = 0
for c in childrens:
count += foo(c)
return count
def foo2(net, layer):
count = 0
for n, m in net.named_modules():
if isinstance(m, type_dict[layer]):
count += 1
return count
for layer in layers:
# nums_list.append(foo(model))
nums_list.append(foo2(model, layer))
total = sum(nums_list)
strtip = ''
for layer, nums in zip(list(layers), nums_list):
strtip += ', %s: %s' % (layer, nums)
print('\n*** Number of layers: %s %s ...\n' % (total, strtip))
return total
def calculate_time_cost(model, insize=32, toc=1, use_gpu=False, pritout=False):
if not use_gpu:
x = torch.randn(4, 3, insize, insize)
tic, toc = time.time(), toc
y = [model(x) for _ in range(toc)][0]
toc = (time.time() - tic) / toc
print('处理时间: %.5f 秒\t' % toc)
if not isinstance(y, (list, tuple)):
y = [y]
if pritout:
print('预测输出: %s 个xfc.' % len(y), [yy.max(1) for yy in y])
return y
else:
assert torch.cuda.is_available()
x = torch.randn(4, 3, insize, insize)
model, x = model.cuda(), x.cuda()
tic, toc = time.time(), toc
y = [model(x) for _ in range(toc)][0]
toc = (time.time() - tic) / toc
print('处理时间: %.5f 秒\t' % toc)
if not isinstance(y, (list, tuple)):
y = [y]
if pritout:
print('预测输出: %s 个xfc.' % len(y), [yy.max(1) for yy in y])
return y
def get_model_summary(model, insize=224, item_length=26, verbose=False):
"""
forked from HRNet-cls
"""
summary = []
input_tensors = torch.rand((1, 3, insize, insize))
ModuleDetails = namedtuple("Layer", ["name", "input_size", "output_size", "num_parameters", "multiply_adds"])
hooks = []
layer_instances = {}
def add_hooks(module):
def hook(module, input, output):
class_name = str(module.__class__.__name__)
instance_index = 1
if class_name not in layer_instances:
layer_instances[class_name] = instance_index
else:
instance_index = layer_instances[class_name] + 1
layer_instances[class_name] = instance_index
layer_name = class_name + "_" + str(instance_index)
params = 0
if class_name.find("Conv") != -1 or class_name.find("BatchNorm") != -1 or \
class_name.find("Linear") != -1:
for param_ in module.parameters():
params += param_.view(-1).size(0)
flops = "Not Available"
if class_name.find("Conv") != -1 and hasattr(module, "weight"):
flops = (
torch.prod(
torch.LongTensor(list(module.weight.data.size()))) *
torch.prod(
torch.LongTensor(list(output.size())[2:]))).item()
elif isinstance(module, nn.Linear):
flops = (torch.prod(torch.LongTensor(list(output.size()))) \
* input[0].size(1)).item()
if isinstance(input[0], list):
input = input[0]
if isinstance(output, list):
output = output[0]
summary.append(
ModuleDetails(
name=layer_name,
input_size=list(input[0].size()),
output_size=list(output.size()),
num_parameters=params,
multiply_adds=flops)
)
if not isinstance(module, nn.ModuleList) \
and not isinstance(module, nn.Sequential) \
and module != model:
hooks.append(module.register_forward_hook(hook))
model.eval()
model.apply(add_hooks)
space_len = item_length
model(input_tensors)
for hook in hooks:
hook.remove()
details = ''
if verbose:
details = "Model Summary" + \
os.linesep + \
"Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}".format(
' ' * (space_len - len("Name")),
' ' * (space_len - len("Input Size")),
' ' * (space_len - len("Output Size")),
' ' * (space_len - len("Parameters")),
' ' * (space_len - len("Multiply Adds (Flops)"))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
params_sum = 0
flops_sum = 0
for layer in summary:
params_sum += layer.num_parameters
if layer.multiply_adds != "Not Available":
flops_sum += layer.multiply_adds
if verbose:
details += "{}{}{}{}{}{}{}{}{}{}".format(
layer.name,
' ' * (space_len - len(layer.name)),
layer.input_size,
' ' * (space_len - len(str(layer.input_size))),
layer.output_size,
' ' * (space_len - len(str(layer.output_size))),
layer.num_parameters,
' ' * (space_len - len(str(layer.num_parameters))),
layer.multiply_adds,
' ' * (space_len - len(str(layer.multiply_adds)))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += os.linesep \
+ "Total Parameters: {:,}".format(params_sum) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Total Multiply Adds (For Convolution and Linear Layers only): {:,} GFLOPs".format(
flops_sum / (1024 ** 3)) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Number of Layers" + os.linesep
for layer in layer_instances:
details += "{} : {} layers ".format(layer, layer_instances[layer])
return details
def tensorboard_add_model(model, x, comment=''):
assert isinstance(model, torch.nn.Module)
assert isinstance(x, torch.Tensor)
from tensorboardX import SummaryWriter
current_time = datetime.now().strftime('%b%d_%H:%M-graph--')
log_dir = os.path.join('runs', current_time + model._get_name() + comment)
writer = SummaryWriter(log_dir)
writer.add_graph(model, x)
print('\n*** Model has been add to tensorboardX graph dir: %s ...\n' % (log_dir,))
def find_max_index(dir, sign1='-exp', sign2='.ckpt'):
files = list(os.walk(dir))[0][2]
index = [0]
for f in files:
if sign1 in f and sign2 in f:
f = f.split(sign1)[1].split(sign2)[0]
index.append(int(f))
return max(index)
def find_max_index2(dir, sign1='-exp'):
print('\n*** try to find max exp index in dir: %s ***\n' % dir)
files = list(os.walk(dir))[0][1]
index = [0]
for f in files:
if sign1 in f:
f = f.split(sign1)[1]
index.append(int(f))
return max(index)
def print_size(x, ok=True):
if not ok:
return
if isinstance(x, torch.Tensor):
print(x.size())
elif isinstance(x, (list, tuple)):
for xx in x:
if isinstance(xx, torch.Tensor):
print(xx.size())
def record_topk_value(record, val, k=5):
# record the max topk value
assert isinstance(record, list)
if len(record) < k:
record.append(val)
return record
elif len(record) > k:
record = sorted(record)[::-1]
if min(record) > val:
return record[:k]
else:
record = record[:k]
record = record_topk_value(record, val, k)
return record
else:
min_val = min(record)
if min_val >= val:
return record
else:
record.pop(record.index(min_val))
record.append(val)
return record
def plot_log(log_path='./logs/log.txt'):
# forked from https://github.com/prlz77/ResNeXt.pytorch
import re
import matplotlib.pyplot as plt
file = open(log_path, 'r')
accuracy = []
epochs = []
loss = []
for line in file:
test_accuracy = re.search('"test_accuracy": ([0]\.[0-9]+)*', line)
if test_accuracy:
accuracy.append(test_accuracy.group(1))
epoch = re.search('"epoch": ([0-9]+)*', line)
if epoch:
epochs.append(epoch.group(1))
train_loss = re.search('"train_loss": ([0-9]\.[0-9]+)*', line)
if train_loss:
loss.append(train_loss.group(1))
file.close()
plt.figure('test_accuracy vs epochs')
plt.xlabel('epoch')
plt.ylabel('test_accuracy')
plt.plot(epochs, accuracy, 'b*')
plt.plot(epochs, accuracy, 'r')
plt.grid(True)
plt.figure('train_loss vs epochs')
plt.xlabel('epoch')
plt.ylabel('train_loss')
plt.plot(epochs, loss, 'b*')
plt.plot(epochs, loss, 'y')
plt.grid(True)
plt.show()
def tensor_to_img(x, ishow=False, istrans=False, file_name='xxxx', save_dir=''):
maxv, minv, meanv = x.max(), x.min(), x.mean()
x = x[0, 0:3, :, :].squeeze(0)
if istrans:
x = ((x - minv) / (maxv - minv)) * 255
maxv, minv, meanv = x.max(), x.min(), x.mean()
img = transforms.ToPILImage()(x)
if ishow:
img.show()
if save_dir:
file_name = file_name + '_' + str(time.time()).replace('.', '')[0:14] + '.png'
file_path = os.path.join(save_dir, file_name)
img.save(file_path)
print('img has been saved at %s ' % file_path)
def get_pretrained_path(model_dir, arch_name='resnet18'):
"""
:model_dir: 存放预训练模型的文件夹,将所有预训练模型集中存放在此文件夹内
:arch_name: 根据模型名称找到对应的路径
:noraise: 找不到时不提示错误
:return: the path of the pretrained model for torch.load(model_path)
"""
if os.path.isfile(model_dir):
return model_dir
elif '.pth' in arch_name or '.tar' in arch_name:
model_path = os.path.join(model_dir, arch_name)
if os.path.isfile(model_path):
return model_path
else:
raise FileNotFoundError('%s' % model_path)
arch_name_list = [
'vgg11-bbd30ac9.pth',
'vgg19-dcbb9e9d.pth',
'resnet18-5c106cde.pth',
'resnet34-333f7ec4.pth',
'resnet50-19c8e357.pth',
'resnet101-5d3b4d8f.pth',
'resnet152-b121ed2d.pth',
'densenet121-a639ec97.pth',
'densenet169-b2777c0a.pth',
'densenet201-c1103571.pth',
'densenet161-8d451a50.pth',
'fishnet99_ckpt.tar',
'fishnet150_ckpt.tar',
'fishnet15x_ckpt_welltrain-==fishnet150.tar',
'mobilev3_y_large-657e7b3d.pth',
'mobilev3_y_small-c7eb32fe.pth',
'mobilev3_x_large.pth.tar',
'mobilev3_x_small.pth.tar',
'mobilev3_d_small.pth.tar',
]
arch = arch_name
arch_name = [name for name in arch_name_list if name.startswith(arch_name)]
if len(arch_name) == 1:
arch_name = arch_name[0]
elif len(arch_name) > 1:
raise Warning('too much choices for %s ... !' % arch)
else:
raise Warning('no checkpoint exist for %s... !' % arch)
model_path = os.path.join(model_dir, arch_name)
return model_path
def load_ckpt_weights(model, ckptf, device='cpu', mgpus_to_sxpu='none', noload=False, strict=True):
"""
MultiGpu.ckpt/.model 与 SingleXpu.model/.ckpt 之间转换加载
m2s: MultiGpu.ckpt -> SingleXpu.model ; remove prefix 'module.'
s2m: SingleXpu.ckpt -> MultiGpu.model ; add prefix 'module.'
none: MultiGpu -> MultiGpu or SingleXpu -> SingleXpu ; 无需转换直接加载.
auto: 轮流选择上述三种情况,直到加载成功
"""
def remove_module_dot(old_state_dict):
# remove the prefix 'module.' of nn.DataParallel
state_dict = OrderedDict()
for k, v in old_state_dict.items():
state_dict[k[7:]] = v
return state_dict
def add_module_dot(old_state_dict):
# add the prefix 'module.' to nn.DataParallel
state_dict = OrderedDict()
for k, v in old_state_dict.items():
state_dict['module.' + k] = v
return state_dict
if isinstance(device, torch.device):
pass
elif device == 'cpu':
device = torch.device(device)
elif device == 'gpu':
device = torch.device('cuda:0')
elif device.startswith('cuda:'):
device = torch.device(device)
else:
raise NotImplementedError
model = model.to(device)
if noload:
return model
print('\n=> loading model.pth from %s ' % ckptf)
assert os.path.isfile(ckptf), '指定路径下的ckpt文件未找到. %s' % ckptf
assert mgpus_to_sxpu in ['auto', 'm2s', 's2m', 'none']
ckpt = torch.load(f=ckptf, map_location=device)
if 'state_dict' in ckpt.keys():
state_dict = ckpt['state_dict']
elif 'model' in ckpt.keys():
state_dict = ckpt['model']
else:
# ckpt is jus the state_dict.pth!
state_dict = ckpt
if mgpus_to_sxpu == 'auto':
try:
model.load_state_dict(state_dict, strict)
except:
try:
model.load_state_dict(remove_module_dot(state_dict), strict)
except:
try:
model.load_state_dict(add_module_dot(state_dict), strict)
except:
print('\n=> Error: key-in-model and key-in-ckpt not match, '
'not because of the prefrex "module." eg. "." cannot be exist in key.\n')
model.load_state_dict(state_dict, strict)
print('\nSuccess: loaded done from %s \n' % ckptf)
return model
elif mgpus_to_sxpu == 'm2s':
state_dict = remove_module_dot(state_dict)
elif mgpus_to_sxpu == 's2m':
state_dict = add_module_dot(state_dict)
elif mgpus_to_sxpu == 'none':
state_dict = state_dict
model.load_state_dict(state_dict, strict)
print('\nSuccess: loaded done from %s \n' % ckptf)
return model
def linear_map(a, b, x):
"""
线性映射x到区间[a, b]
:return:
"""
assert max(x) != min(x)
assert isinstance(x, np.ndarray)
return (x - min(x)) / (max(x) - min(x)) * (b - a) + a
def startwithxyz(it, xyz=()):
# it startwith x or y or z ?
assert isinstance(it, str)
assert isinstance(xyz, (tuple, list))
isok = [it.startswith(x) for x in xyz]
return bool(sum(isok))
class Curves(object):
"""
为 weight-decay 提供取值曲线
"""
def __init__(self, ep=None):
self.ep = ep
super(Curves, self).__init__()
def func1(self, x):
if self.ep is None:
self.ep = 8
return round(x, self.ep)
def func2(self, x):
if self.ep is None:
self.ep = 3
return x ** self.ep
def func3(self, x):
if self.ep is None:
self.ep = 3
return x ** (1 / self.ep)
def func4(self, x):
return math.exp(x)
def func5(self, x):
return math.exp(-x)
def func6(self, x):
return 1 - math.exp(x)
def GCU(m, n):
# 欧几里得辗转相除法求最大公约数
# https://www.cnblogs.com/todayisafineday/p/6115852.html
if not n:
return m
else:
return GCU(n, m % n)
def get_xfc_which(it, xfc_which):
"""
- it: 当前迭代次数
- xfc_which: {0 * BN: -1, 20 * BN: -2, 30 * BN: -3}
"""
if isinstance(xfc_which, int):
return xfc_which
elif isinstance(xfc_which, str):
return xfc_which
elif isinstance(xfc_which, dict):
which = None
for ite in sorted(xfc_which.keys())[::-1]:
if it >= ite:
which = xfc_which[ite]
break
if which is None:
raise NotImplementedError
return which
else:
raise NotImplementedError
# 根据设备进行路径配置, 更换设备后直接在此配置即可
# including 预训练模型路径,数据路径,当前实验路径
def get_current_device(device=0):
if isinstance(device, int):
device_list = ['1080Ti', 'titan', 'mic251', 'dellcpu', 'new-device']
device = device_list[device]
elif isinstance(device, str):
device = device
else:
raise NotImplementedError
return device
def get_pretrained_models():
device = get_current_device()
model_dir = {'1080Ti': '/datah/zhangjp/PreTrainedModels',
'titan': '/data/zhangjp/PreTrainedModels',
'mic251': '/DATA/251/jpzhang/Projects/PreTrainedModels',
'dellcpu': 'E://PreTrainedModels',
'new-device': ''}
model_dir = model_dir[device]
return model_dir
def get_data_root(data='imagenet || cifar10 || cifar100 || svhn || ***'):
device = get_current_device()
class Dataset(object):
imagenet = {
'1080Ti': ['/ImageNet2012/', '/data0/ImageNet_ILSVRC2012'][0],
'titan': '/data/dataset/ImageNet2012',
'mic251': '/data1/jpzhang/datasets/imagenet/',
'new-device': '',
}
cifar10 = {
'1080Ti': '/data0/cifar10/',
'titan': '/data/dataset/cifar-10-batches-py/',
'mic251': '/data1/jpzhang/datasets/cifar10/',
'new-device': '',
}
cifar100 = {
'1080Ti': '/data0/cifar100/',
'titan': '/data/dataset/cifar-100-python/',
'mic251': '/data1/jpzhang/datasets/cifar100/',
'new-device': '',
}
svhn = {
'1080Ti': '',
'titan': '',
'mic251': '',
'new-device': '',
}
data_root = getattr(Dataset(), data.lower())[device]
return data_root
def get_base_dir(k='ckpt || log'):
device = get_current_device()
assert k in ['ckpt', 'log']
ckpt_base_dir = {'local': '.',
'1080Ti': '/data1/zhangjp/classify/checkpoints',
'titan': '/backup/zhangjp/classify/checkpoints',
'mic251': '/DATA/251/jpzhang/Projects/checkpoints',
'new-device': ''}
log_base_dir = {'local': '.',
'1080Ti': '/data1/zhangjp/classify/runs',
'titan': '/backup/zhangjp/classify/runs',
'mic251': '/DATA/251/jpzhang/Projects/runs',
'new-device': ''}
if k == 'ckpt':
return ckpt_base_dir[device]
else:
return log_base_dir[device]
if __name__ == '__main__':
import torchvision as tv
from xmodels.scalenet import ScaleNet
imgnet = {'stages': 3, 'depth': 22, 'branch': 3, 'rock': 'U', 'kldloss': False,
'layers': (3, 3, 3), 'blocks': ('D', 'D', 'S'), 'slink': ('A', 'A', 'A'),
'growth': (0, 0, 0), 'classify': (0, 0, 0), 'expand': (1 * 22, 2 * 22),
'dfunc': ('O', 'O'), 'dstyle': 'maxpool', 'fcboost': 'none', 'nclass': 1000,
'last_branch': 1, 'last_down': False, 'last_dfuc': 'D', 'last_expand': 32,
'summer': 'split', 'afisok': False, 'version': 2}
model = tv.models.resnet18()
x = torch.Tensor(1, 3, 224, 224)
print(model._modules.keys())
v = model.layer1
pred = model(x)
print(pred.max(1))
model = ScaleNet(**imgnet)
x = torch.Tensor(1, 3, 256, 256)
pred = model(x)
pred = pred[0]
print(pred.max(1))
print(model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.