content
stringlengths 5
1.05M
|
---|
"""
The default bootpeg grammar
"""
from typing import Union
from functools import singledispatch
from ..apegs.boot import (
Value,
Range,
Any,
Empty,
Sequence,
Choice,
Repeat,
And,
Not,
Entail,
Capture,
Transform,
Reference,
Rule,
Clause,
Parser,
Grammar,
bpeg_parser,
)
from ..api import bootpeg_actions, import_parser
precedence = {
clause: prec
for prec, clauses in enumerate(
(
[Value, Range, Any, Empty, Reference],
[Not, And],
[Repeat, Capture],
[Sequence, Entail],
[Choice, Transform],
)
)
for clause in clauses
}
def _wrapped(clause: Clause, parent: Clause) -> str:
literal = unparse(clause)
if literal[0] == "[" or precedence[type(parent)] >= precedence[type(clause)]:
return literal
else:
return f"({unparse(clause)})"
@singledispatch
def unparse(clause: Union[Clause, Parser, Grammar]) -> str:
"""Format a ``clause`` according to bootpeg standard grammar"""
raise NotImplementedError(f"Cannot unparse {clause!r} as bpeg")
@unparse.register(Parser)
@unparse.register(Grammar)
def unparse_grammar(clause: Union[Parser, Grammar]) -> str:
return "\n\n".join(unparse(rule) for rule in clause.rules)
@unparse.register(Value)
def unparse_literal(clause: Value) -> str:
if clause.value == "\n":
return r"\n"
if '"' in clause.value:
if "'" in clause.value:
return '"' + clause.value.replace("'", r"\'") + '"'
return f"'{clause.value}'"
return f'"{clause.value}"'
@unparse.register(Range)
def unparse_range(clause: Range) -> str:
return f"{clause.lower!r}-{clause.upper!r}"
@unparse.register(Empty)
def unparse_empty(clause: Empty) -> str:
return '""'
@unparse.register(Any)
def unparse_any(clause: Any) -> str:
return "." * clause.length
@unparse.register(Reference)
def unparse_reference(clause: Reference) -> str:
return clause.name
@unparse.register(Sequence)
def unparse_sequence(clause: Sequence) -> str:
return " ".join(_wrapped(sub_clause, clause) for sub_clause in clause.sub_clauses)
@unparse.register(Entail)
def unparse_entail(clause: Entail) -> str:
return "~ " + " ".join(
_wrapped(sub_clause, clause) for sub_clause in clause.sub_clauses
)
@unparse.register(Choice)
def unparse_choice(clause: Choice) -> str:
if isinstance(clause.sub_clauses[-1], Empty):
return (
"["
+ " | ".join(
_wrapped(sub_clause, clause) for sub_clause in clause.sub_clauses[:-1]
)
+ "]"
)
return " | ".join(_wrapped(sub_clause, clause) for sub_clause in clause.sub_clauses)
@unparse.register(Repeat)
def unparse_repeat(clause: Repeat) -> str:
return _wrapped(clause.sub_clause, clause) + "+"
@unparse.register(Not)
def unparse_not(clause: Not) -> str:
return "!" + _wrapped(clause.sub_clause, clause)
@unparse.register(And)
def unparse_and(clause: And) -> str:
return "&" + _wrapped(clause.sub_clause, clause)
@unparse.register(Capture)
def unparse_capture(clause: Capture) -> str:
var = "*" if clause.variadic else ""
return f"{var}{clause.name}={_wrapped(clause.sub_clause, clause)}"
@unparse.register(Transform)
def unparse_transform(clause: Transform) -> str:
return f"{unparse(clause.sub_clause)} {{{clause.action}}}"
@unparse.register(Rule)
def unparse_rule(clause: Rule) -> str:
sub_clause = clause.sub_clause
if isinstance(sub_clause, Choice):
body = "\n".join(f" | {unparse(case)}" for case in sub_clause.sub_clauses)
else:
body = f" | {unparse(sub_clause)}"
return f"{clause.name}:\n{body}"
parse: Parser[str, Grammar] = import_parser(
__name__,
dialect=bpeg_parser,
actions=bootpeg_actions,
)
|
from ctypes import c_float
from math import ceil
from pathlib import Path
from pybgfx import bgfx
from pybgfx.utils import as_void_ptr
from pybgfx.utils.shaders_utils import ShaderType, load_shader
from natrix.core.common.constants import TemplateConstants
from natrix.core.fluid_simulator import FluidSimulator
from natrix.core.utils.shaders_utils import create_buffer
root_path = Path(__file__).parent / "shaders"
class SmoothParticlesArea:
PARTICLES_IN = 0
PARTICLES_OUT = 1
_width = 512
_height = 512
_particles_buffer = None
_speed = 500.0
_dissipation = 1.0
simulate = True
def __init__(
self,
width: int,
height: int,
fluid_simulation: FluidSimulator,
vertex_layout: bgfx.VertexLayout,
):
self.fluid_simulation = fluid_simulation
self.vertex_layout = vertex_layout
self._width = width
self._height = height
self._create_uniforms()
self._load_compute_kernels()
self._set_size()
self._create_buffers()
self._init_compute_kernels()
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, value):
if value > 0:
self._speed = value
else:
raise ValueError("'Speed' should be greater than zero")
@property
def dissipation(self):
return self._dissipation
@dissipation.setter
def dissipation(self, value):
if value > 0:
self._dissipation = value
else:
raise ValueError("'Dissipation' should be grater than zero")
def add_particles(self, position: tuple, radius: float, strength: float):
if self.simulate:
self._init_compute_kernels()
bgfx.setUniform(
self.position_uniform,
as_void_ptr((c_float * 2)(position[0], position[1])),
)
bgfx.setUniform(self.value_uniform, as_void_ptr((c_float * 1)(strength)))
bgfx.setUniform(self.radius_uniform, as_void_ptr((c_float * 1)(radius)))
bgfx.dispatch(
0, self._add_particles_kernel, self._num_groups_x, self._num_groups_x, 1
)
self._flip_buffer()
def update(self, time_delta: float):
self._init_compute_kernels()
if self.simulate:
bgfx.setUniform(
self.dissipation_uniform, as_void_ptr((c_float * 1)(self.dissipation))
)
bgfx.setUniform(
self.elapsed_time_uniform, as_void_ptr((c_float * 1)(time_delta))
)
bgfx.setUniform(self.speed_uniform, as_void_ptr((c_float * 1)(self.speed)))
bgfx.dispatch(
0,
self._advect_particles_kernel,
self._num_groups_x,
self._num_groups_y,
1,
)
self._flip_buffer()
def _set_size(self):
self._particle_size = (self._width, self._height)
self._velocity_size = (
self.fluid_simulation.width,
self.fluid_simulation.height,
)
group_size_x = TemplateConstants.NUM_THREADS.value
group_size_y = TemplateConstants.NUM_THREADS.value
self._num_cells = self._width * self._height
self._num_groups_x = int(ceil(float(self._width) / float(group_size_x)))
self._num_groups_y = int(ceil(float(self._height) / float(group_size_y)))
def _create_uniforms(self):
self.particle_size_uniform = bgfx.createUniform(
"_ParticleSize", bgfx.UniformType.Vec4
)
self.position_uniform = bgfx.createUniform("_Position", bgfx.UniformType.Vec4)
self.value_uniform = bgfx.createUniform("_Value", bgfx.UniformType.Vec4)
self.radius_uniform = bgfx.createUniform("_Radius", bgfx.UniformType.Vec4)
self.dissipation_uniform = bgfx.createUniform(
"_Dissipation", bgfx.UniformType.Vec4
)
self.elapsed_time_uniform = bgfx.createUniform(
"_ElapsedTime", bgfx.UniformType.Vec4
)
self.speed_uniform = bgfx.createUniform("_Speed", bgfx.UniformType.Vec4)
self.velocity_size_uniform = bgfx.createUniform(
"_VelocitySize", bgfx.UniformType.Vec4
)
def _init_compute_kernels(self):
bgfx.setUniform(
self.particle_size_uniform,
as_void_ptr((c_float * 2)(self._particle_size[0], self._particle_size[1])),
)
bgfx.setUniform(
self.velocity_size_uniform,
as_void_ptr((c_float * 2)(self._velocity_size[0], self._velocity_size[1])),
)
bgfx.setBuffer(
TemplateConstants.PARTICLES_IN.value,
self._particles_buffer[self.PARTICLES_IN],
bgfx.Access.Write,
)
bgfx.setBuffer(
TemplateConstants.PARTICLES_OUT.value,
self._particles_buffer[self.PARTICLES_OUT],
bgfx.Access.Write,
)
def _create_buffers(self):
self._particles_buffer = [
create_buffer(self._num_cells, 1, self.vertex_layout),
create_buffer(self._num_cells, 1, self.vertex_layout),
]
def _load_compute_kernels(self):
self._add_particles_kernel = bgfx.createProgram(
load_shader(
"shader.AddParticle.comp", ShaderType.COMPUTE, root_path=root_path
),
True,
)
self._advect_particles_kernel = bgfx.createProgram(
load_shader(
"shader.AdvectParticle.comp", ShaderType.COMPUTE, root_path=root_path
),
True,
)
def _flip_buffer(self):
tmp = self.PARTICLES_IN
self.PARTICLES_IN = self.PARTICLES_OUT
self.PARTICLES_OUT = tmp
bgfx.setBuffer(
TemplateConstants.PARTICLES_IN.value,
self._particles_buffer[self.PARTICLES_IN],
bgfx.Access.Read,
)
bgfx.setBuffer(
TemplateConstants.PARTICLES_OUT.value,
self._particles_buffer[self.PARTICLES_OUT],
bgfx.Access.Write,
)
def destroy(self):
# Destroy uniforms
bgfx.destroy(self.particle_size_uniform)
bgfx.destroy(self.position_uniform)
bgfx.destroy(self.value_uniform)
bgfx.destroy(self.radius_uniform)
bgfx.destroy(self.dissipation_uniform)
bgfx.destroy(self.elapsed_time_uniform)
bgfx.destroy(self.speed_uniform)
bgfx.destroy(self.velocity_size_uniform)
# Destroy buffers
bgfx.destroy(self._particles_buffer[0])
bgfx.destroy(self._particles_buffer[1])
# Destroy compute shaders
bgfx.destroy(self._add_particles_kernel)
bgfx.destroy(self._advect_particles_kernel)
|
class Solution:
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
one_bits_counts = [0]
for n in range(1, num + 1):
if n % 2 == 0:
extra = 0
else:
extra = 1
one_bits_counts.append(one_bits_counts[n // 2] + extra)
return one_bits_counts
def main():
sol = Solution()
print(sol.countBits(0))
print(sol.countBits(2))
print(sol.countBits(5))
if __name__ == '__main__':
main()
|
"""
A very simple yet meaningful optimal control program consisting in a pendulum starting downward and ending upward
while requiring the minimum of generalized forces. The solver is only allowed to move the pendulum sideways.
This simple example is a good place to start investigating explicit and implicit dynamics. There are extra controls in
implicit dynamics which are joint acceleration qddot thus, u=[tau, qddot]^T. Also a dynamic constraints is enforced at
each shooting nodes such that InverseDynamics(q,qdot,qddot) - tau = 0.
Finally, once it finished optimizing, it animates the model using the optimal solution.
"""
import biorbd_casadi as biorbd
from bioptim import (
OptimalControlProgram,
DynamicsFcn,
Dynamics,
Bounds,
QAndQDotBounds,
InitialGuess,
ObjectiveFcn,
OdeSolver,
CostType,
Solver,
BoundsList,
ObjectiveList,
)
import matplotlib.pyplot as plt
import numpy as np
def prepare_ocp(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
ode_solver: OdeSolver = OdeSolver.RK1(n_integration_steps=1),
use_sx: bool = False,
n_threads: int = 1,
implicit_dynamics: bool = False,
) -> OptimalControlProgram:
"""
The initialization of an ocp
Parameters
----------
biorbd_model_path: str
The path to the biorbd model
final_time: float
The time in second required to perform the task
n_shooting: int
The number of shooting points to define int the direct multiple shooting program
ode_solver: OdeSolver = OdeSolver.RK4()
Which type of OdeSolver to use
use_sx: bool
If the SX variable should be used instead of MX (can be extensive on RAM)
n_threads: int
The number of threads to use in the paralleling (1 = no parallel computing)
implicit_dynamics: bool
implicit
Returns
-------
The OptimalControlProgram ready to be solved
"""
biorbd_model = biorbd.Model(biorbd_model_path)
objective_functions = ObjectiveList()
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau")
# Dynamics
dynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN, implicit_dynamics=implicit_dynamics)
# Path constraint
tau_min, tau_max, tau_init = -100, 100, 0
# Be careful to let the accelerations not to much bounded to find the same solution in implicit dynamics
if implicit_dynamics:
qddot_min, qddot_max, qddot_init = -1000, 1000, 0
x_bounds = BoundsList()
x_bounds.add(bounds=QAndQDotBounds(biorbd_model))
x_bounds[0][:, [0, -1]] = 0
x_bounds[0][1, -1] = 3.14
# Initial guess
n_q = biorbd_model.nbQ()
n_qdot = biorbd_model.nbQdot()
n_qddot = biorbd_model.nbQddot()
n_tau = biorbd_model.nbGeneralizedTorque()
x_init = InitialGuess([0] * (n_q + n_qdot))
# Define control path constraint
# There are extra controls in implicit dynamics which are joint acceleration qddot.
if implicit_dynamics:
u_bounds = Bounds([tau_min] * n_tau + [qddot_min] * n_qddot, [tau_max] * n_tau + [qddot_max] * n_qddot)
else:
u_bounds = Bounds([tau_min] * n_tau, [tau_max] * n_tau)
u_bounds[1, :] = 0 # Prevent the model from actively rotate
if implicit_dynamics:
u_init = InitialGuess([0] * (n_tau + n_qddot))
else:
u_init = InitialGuess([0] * n_tau)
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
final_time,
x_init=x_init,
u_init=u_init,
x_bounds=x_bounds,
u_bounds=u_bounds,
objective_functions=objective_functions,
ode_solver=ode_solver,
use_sx=use_sx,
n_threads=n_threads,
)
def solve_ocp(implicit_dynamics: bool) -> OptimalControlProgram:
"""
The initialization of ocp with implicit_dynamics as the only argument
Parameters
----------
implicit_dynamics: bool
implicit
Returns
-------
The OptimalControlProgram ready to be solved
"""
model_path = "models/pendulum.bioMod"
n_shooting = 200 # The higher it is, the closer implicit and explicit solutions are.
ode_solver = OdeSolver.RK2(n_integration_steps=1)
time = 1
# --- Prepare the ocp with implicit dynamics --- #
ocp = prepare_ocp(
biorbd_model_path=model_path,
final_time=time,
n_shooting=n_shooting,
ode_solver=ode_solver,
implicit_dynamics=implicit_dynamics,
)
# --- Custom Plots --- #
ocp.add_plot_penalty(CostType.ALL)
# --- Solve the ocp --- #
sol_opt = Solver.IPOPT(show_online_optim=False)
sol = ocp.solve(sol_opt)
return sol
def prepare_plots(sol_implicit, sol_explicit):
plt.figure()
tau_ex = sol_explicit.controls["tau"][0, :]
tau_im = sol_implicit.controls["tau"][0, :]
plt.plot(tau_ex, label="tau in explicit dynamics")
plt.plot(tau_im, label="tau in implicit dynamics")
plt.xlabel("frames")
plt.ylabel("Torque (Nm)")
plt.legend()
plt.figure()
cost_ex = np.sum(sol_explicit.cost)
cost_im = np.sum(sol_implicit.cost)
plt.bar([0, 1], width=0.3, height=[cost_ex, cost_im])
plt.xticks([0, 1], ["explicit", "implicit"])
plt.ylabel(" weighted cost function")
plt.figure()
time_ex = np.sum(sol_explicit.real_time_to_optimize)
time_im = np.sum(sol_implicit.real_time_to_optimize)
plt.bar([0, 1], width=0.3, height=[time_ex, time_im])
plt.xticks([0, 1], ["explicit", "implicit"])
plt.ylabel("time (s)")
plt.show()
def main():
"""
The pendulum runs two ocp with implicit and explicit dynamics and plot comparison for the results
"""
# --- Prepare the ocp with implicit and explicit dynamics --- #
sol_implicit = solve_ocp(implicit_dynamics=True)
sol_explicit = solve_ocp(implicit_dynamics=False)
# --- Show the results in a bioviz animation --- #
sol_implicit.print()
# sol_implicit.animate(n_frames=100)
# sol_implicit.graphs()
# --- Show the results in a bioviz animation --- #
sol_explicit.print()
# sol_explicit.animate(n_frames=100)
# sol_explicit.graphs()
# Tau are closer between implicit and explicit when the dynamic is more discretized,
# meaning the more n_shooting is high, the more tau are close.
prepare_plots(sol_implicit, sol_explicit)
if __name__ == "__main__":
main()
|
"""
Given a 2-D matrix representing an image, a location of a pixel in
the screen and a color C, replace the color of the given pixel and all adjacent same colored pixels with C.
For example, given the following matrix, and location pixel of (2, 2), and 'G' for green:
B B W
W W W
W W W
B B B
Becomes
B B G
G G G
G G G
B B B
"""
# key idea : flood-fill algo with 8 connectivity components
connectivity = [
(-1, 0), # North
(-1, 1), # North-East
(0, 1), # East
(1, 1), # South-East
(1, 0), # South
(1, -1), # South-West
(0, -1), # West
(-1, -1), # North-West
]
def my_flood_fill(mat, loc, new_color):
r,c = loc
curr_color = mat[r][c]
mat[r][c] = new_color
for row, col in connectivity:
if -1 < r+row < len(mat) and -1 < c+col < len(mat[1]) and mat[r+row][c+col] == curr_color:
my_flood_fill(mat, (r+row, col+c), new_color)
if __name__ == '__main__':
mat = [
['B', 'B', 'W'],
['W', 'W', 'W'],
['W', 'W', 'W'],
['B', 'B', 'B']
]
print(*mat, sep='\n', end='\n'+'---'*5+'\n')
my_flood_fill(mat, (2,2), 'G')
print(*mat, sep='\n', end='\n'+'---'*5+'\n') |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
import funcoes as fun
def verifica_escola(texto):
result=""
contador=0
substring1 = "escola"
if substring1 in texto:
result+=substring1+" "
contador+=1
return result,contador
def encontra_esportes(texto):
result=""
contador=0
pesquisar_esportes = ['futebol', 'volei', 'tênis de mesa', 'natação', 'futsal', 'capoeira', 'skate', 'skatismo', 'surf', 'vôlei de praia', 'badminton', 'frescobol', 'judô', 'atletismo', 'críquete', 'basquete', 'hockey na grama', 'hockey no gelo', 'beisebol', 'fórmula 1', 'Rugby', 'futebol americano', 'golfe', 'handebol', 'queimado', 'hipismo', 'ginástica olímpica', 'Triatlo', 'maratona', 'canoagem', 'peteca', 'jiu-jitsu', 'esgrima', 'vale-tudo', 'karatê', 'corrida', 'ciclismo', 'boxe', 'MMA', 'Taekwondo']
# print(len(pesquisar_esportes))
for i in pesquisar_esportes:
if i in texto:
result+=i+" "
contador+=1
return result,contador
def busca_batalha_luta_guerra(texto):
result=""
contador=0
palavras_crivo_intermediario = ['lut','guerr','batalha']
# palavras_crivo_intermediario = ''.join(palavras_crivo_intermediario_inicio)
# palavras_crivo_intermediario = palavras_crivo_intermediario_inicio.split()
# palavras_crivo_intermediario_encontradas = []
for i in palavras_crivo_intermediario:
if i in texto:
result+=i+" "
contador+=1
return result,contador
def busca_amizade(texto):
result=""
contador=0
palavras_crivo_amizade = ['amig', 'amiz', 'migux','amigos','amigas']
for i in palavras_crivo_amizade:
if i in texto:
result+=i+" "
contador+=1
return result,contador
def busca_jogo(texto):
result=""
contador=0
substring = "jog"
if substring in texto:
result+=substring+" "
contador+=1
return result,contador
def proc_inter(texto):
result=""
contador=0
r,c =encontra_esportes(texto)
result+=r
contador+=c
r,c =busca_batalha_luta_guerra(texto)
result+=r
contador+=c
r,c =busca_amizade(texto)
result+=r
contador+=c
r,c =busca_jogo(texto)
result+=r
contador+=c
return result,contador |
"""
PF = Picture Frame
EW = Eye White
EC = Eye Color
BC = Body Color
OT = OutLine
BG = BackGround
BK = BeaK
CH = Christmas Hat
"""
import random
# color generator for common rarity
def common():
PF = [0, 0, 0]
EW = [255, 255, 255]
EC = [random.randint(0, 250), random.randint(0, 250), random.randint(0, 250)]
BC = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
OT = [random.randint(100, 250), random.randint(100, 250), random.randint(100, 250)]
BG = [250, 249, 213]
BK = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
return PF, EW, EC, BC, OT, BG, BK
# color generator for rare rarity
def rare():
eyeWhiteColors = [255, 0]
eyeWhite = random.choice(eyeWhiteColors)
PF = [0, 0, 0]
EW = [50, 50, 50]
EC = [random.randint(100, 255), random.randint(0, 255), random.randint(0, 255)]
BC = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
OT = [255 - BC[0], 255 - BC[1], 255 - BC[2]]
BG = [250, 249, 213]
BK = [255 - EC[0], 255 - EC[1], 255 - EC[2]]
return PF, EW, EC, BC, OT, BG, BK
# color generator for legendary rarity
def legendary_r():
eyeWhiteColors = [255, 0]
eyeWhite = random.choice(eyeWhiteColors)
PF = [0, 0, 0]
EW = [255, 255, 255]
EC = [random.randint(50, 255), 0, 0]
BC = [random.randint(150, 255), 0, 0]
OT = [random.randint(100, 150), 0, 0]
BG = [random.randint(0, 50), 0, 0]
BK = [random.randint(50, 100), 0, 0]
return PF, EW, EC, BC, OT, BG, BK
# color generator for legendary rarity
def legendary_g():
eyeWhiteColors = [255, 0]
eyeWhite = random.choice(eyeWhiteColors)
PF = [0, 0, 0]
EW = [255, 255, 255]
EC = [0, random.randint(50, 255), 0]
BC = [0, random.randint(150, 255), 0]
OT = [0, random.randint(100, 150), 0]
BG = [0, random.randint(0, 50), 0]
BK = [0, random.randint(50, 100), 0]
return PF, EW, EC, BC, OT, BG, BK
# color generator for legendary rarity
def legendary_b():
eyeWhiteColors = [255, 0]
eyeWhite = random.choice(eyeWhiteColors)
PF = [0, 0, 0]
EW = [255, 255, 255]
EC = [0, 0, random.randint(50, 255)]
BC = [0, 0, random.randint(150, 255)]
OT = [0, 0, random.randint(100, 150)]
BG = [0, 0, random.randint(0, 50)]
BK = [0, 0, random.randint(50, 100)]
return PF, EW, EC, BC, OT, BG, BK
# color generator for classified rarity
def classified_blk():
PF = [0, 0, 0]
EW = [255, 255, 255]
ECr = random.randint(0, 150)
EC = [ECr, ECr, ECr]
BCr = random.randint(0, 150)
BC = [BCr, BCr, BCr]
OT = [0, 0, 0]
BG = [250, 249, 213]
BKr = random.randint(0, 150)
BK = [BKr, BKr, BKr]
return PF, EW, EC, BC, OT, BG, BK
# color generator for classified rarity
def classified_wht():
PF = [0, 0, 0]
EW = [80, 80, 80]
ECr = random.randint(120, 200)
EC = [ECr, ECr, ECr]
BCr = random.randint(120, 210)
BC = [BCr, BCr, BCr]
OT = [255, 255, 255]
BG = [28, 28, 28]
BKr = random.randint(160, 210)
BK = [BKr, BKr, BKr]
return PF, EW, EC, BC, OT, BG, BK
# color generator for holiday christmas
def christmas():
PF = [0, 0, 0]
EW = [255, 255, 255]
EC = [random.randint(100, 255), random.randint(0, 255), random.randint(0, 255)]
BC = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
OT = [255 - EC[0], 255 - EC[1], 255 - EC[2]]
BG = [40, 40, 40]
BK = [255 - BC[0], 255 - BC[1], 255 - BC[2]]
return PF, EW, EC, BC, OT, BG, BK
# color generator for upside down image
def upsidedown():
PF = [0, 0, 0]
EW = [0, 0, 0]
EC = [random.randint(100, 255), random.randint(0, 255), random.randint(0, 255)]
BC = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
OT = [255 - EC[0], 255 - EC[1], 255 - EC[2]]
BG = [250, 249, 213]
BK = [255 - BC[0], 255 - BC[1], 255 - BC[2]]
return PF, EW, EC, BC, OT, BG, BK
|
# The knows API is already defined for you.
# @param a, person a
# @param b, person b
# @return a boolean, whether a knows b
# def knows(a, b):
class Solution(object):
def findCelebrity(self, n):
"""
:type n: int
:rtype: int
"""
candidate=0
for i in xrange(n):
# find the 'maxinum' candidate
if knows(candidate,i):
candidate=i
# after candidate, the value all less than candidate
# it means all the people beyond knows candidate
# check whether before has large value, means
# check whether has people candidate knows
if any(knows(candidate,i) for i in xrange(candidate)):
return -1
# check all know candidate
if not all(knows(i,candidate) for i in xrange(n)):
return -1
return candidate
|
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import dash_table
import numpy as np
# import functions from .py files
import app_graphing as app_gr
import app_wrangling as app_wr
# load board game data
boardgame_data = app_wr.call_boardgame_data()
# dictionary for tab 1 slider
max_year = boardgame_data["year_published"].max().year
slider_dict = {x: str(x) for x in range(1950, (max_year + 1), 5)}
# dictionary for dropdowns
col_key_list = ["category", "mechanic", "publisher"]
col_value_list = [app_wr.subset_data(boardgame_data, v) for v in col_key_list]
col_dict = dict(zip(col_key_list, col_value_list))
# radio dict
radio_options = [
{"label": " Categories", "value": "category"},
{"label": " Mechanics", "value": "mechanic"},
{"label": " Publishers", "value": "publisher"},
]
# extents for 3D plot
extents_3d = {
"min_x": boardgame_data["x"].min(),
"max_x": boardgame_data["x"].max(),
"min_y": boardgame_data["y"].min(),
"max_y": boardgame_data["y"].max(),
"min_z": boardgame_data["z"].min(),
"max_z": boardgame_data["z"].max(),
}
# title for all tabs
def title():
"""
:return: A Div containing dashboard title.
"""
return html.Div(
children=[html.H1("Board Game Data Explorer", style={"font-weight": "normal"})]
)
# description card tab 1
def description_card_tab1():
"""
:return: A Div containing welcome message and descriptions on tab 1.
"""
return html.Div(
id="description-card-tab1",
children=[
html.H5("Welcome to the Board Game Data Explorer"),
dcc.Markdown(
id="intro1",
children="Use the tab selectors above to view either \
**Game Trends**, **Top Games**, or the **3D Game Explorer**. \
Descriptions are available on each tab using the **button** \
to the right. A **dataset** description is also available.",
),
],
)
# Data set description for tab 1
def data_set_desc_tab1():
"""
:return: A Div containing description of the data set,
which pops out in the tab 1 modal.
"""
return html.Div(
children=[
html.H4("Description of Dataset"),
dcc.Markdown(
"This dataset comes from the [BoardGameGeek](https://boardgamegeek.com/) \
website and includes board games with descriptions, general game \
details, publisher, and user ratings.",
),
dcc.Markdown(
"Note that he dataset is filtered to remove any games with less than \
50 user ratings and is limited to games published after 1950."
),
]
)
# tab 1 description for modal button
def tab_1_description():
"""
:return: A Div containing description of tab 1
that goes in the pop out modal on tab 1.
"""
return html.Div(
children=[
html.H4("Game Trends"),
dcc.Markdown(
"Select Categories, Mechanics, or Publishers and filter on the \
minimum number of user ratings for a board game."
),
html.H5("Timeseries Subtab"),
dcc.Markdown(
"**Average Game Ratings** provides a plot of each game's \
average user rating vs published year. Note that \
the blue trendline represents the annual average of all \
published games regardless of user selection."
),
dcc.Markdown(
"**Game Counts** provides the total count of games published in each year \
based on the user selections."
),
html.H5("Density Subtab"),
dcc.Markdown(
"View user rating density plots based on user selections. Also allows \
subsetting the data based on a year range using a slider"
),
dcc.Markdown(
"If no elements are selected from the dropdown menu, the top 5 Categories, \
Mechanics or Publishers based on the mean average user rating will \
be displayed."
),
]
)
# tab 2 description for modal button
def tab_2_description():
"""
:return: A Div containing description of tab 2
that goes in the pop out modal on tab 2.
"""
return html.Div(
children=[
html.H4("Top Games"),
dcc.Markdown(
"This tab allows users to select any combination of game categories, \
mechanics and publishers as well as the minimum number of user ratings \
for a game. The top 10 games containing all selections and ranked by \
user rating will be displayed."
),
dcc.Markdown(
"Below the barchart is also a button which will show a table of \
information for the 10 games."
),
]
)
# tab 3 description for modal button
def tab_3_description():
"""
:return: A Div containing description of tab 3
that goes in the pop out modal on tab 3.
"""
return html.Div(
children=[
html.H4("3D Game Explorer"),
dcc.Markdown(
"The 3D Game Explorer shows a visual representation of game \
similarity based on game categories, mechanics, and user ratings."
),
dcc.Markdown(
"The graph was generated using a \
[t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding)\
analysis. Game similarity between mechanics and categories is shown on \
the horizontal (x and y axes) and game similarity in user ratings \
(both counts and average rating) is represented on the vertical \
(z-axis)."
),
dcc.Markdown(
"Clicking on any game will provide game details at the bottom of the left\
column."
),
]
)
# control card for tab 1
def generate_control_card_tab1():
"""
:return: A Div containing controls for graphs on tab 1,
which go on the control card on the left.
"""
return html.Div(
id="control-card-tab1",
children=[
html.H6("Select:"),
dcc.RadioItems(
id="radio-selection-tab1",
options=radio_options,
value="mechanic",
labelStyle={"display": "block"},
persistence=False,
),
html.Br(),
html.H6("Select elements to view:"),
dcc.Dropdown(id="radio-dependent-tab1", options=[], multi=True, value=[]),
html.Br(),
html.H6("Select minumum number of ratings:"),
dcc.Slider(
id="min-num-ratings",
min=0,
max=10000,
step=100,
value=5000,
marks={0: "0", 5000: "5000", 10000: "10000"},
),
html.Br(),
html.Div(id="slider-output-container_2"),
],
)
# control card for tab 2
def generate_control_card_tab2():
"""
:return: A Div containing controls for graphs on tab 2,
which go on the control card on the left.
"""
return html.Div(
id="control-card-tab2",
children=[
html.H6("Select categories:"),
dcc.Dropdown(
id="category-widget-tab2",
value="",
options=[
{"label": name, "value": name} for name in col_dict["category"]
],
multi=True,
),
html.Br(),
html.H6("Select mechanics:"),
dcc.Dropdown(
id="mechanics-widget-tab2",
value="",
options=[
{"label": name, "value": name} for name in col_dict["mechanic"]
],
multi=True,
),
html.Br(),
html.H6("Select publishers:"),
dcc.Dropdown(
id="publisher-widget-tab2",
value="",
options=[
{"label": name, "value": name} for name in col_dict["publisher"]
],
multi=True,
),
html.Br(),
html.H6("Select minumum number of ratings:"),
dcc.Slider(
id="min-num-ratings2",
min=0,
max=10000,
step=100,
value=5000,
marks={0: "0", 5000: "5000", 10000: "10000"},
),
html.Br(),
html.Div(id="slider-output-container_3"),
],
)
# control card for tab 3
def generate_control_card_tab3():
"""
:return: A Div containing controls for graphs on tab 3,
which go on the control card on the left.
"""
return html.Div(
id="control-card-tab3",
children=[
html.H6("Select:"),
dcc.RadioItems(
id="radio-selection-tab3",
options=radio_options,
value="category",
labelStyle={"display": "block"},
),
html.Br(),
html.H6("Select elements to view:"),
dcc.Dropdown(
id="radio-dependent-tab3",
options=[],
multi=True,
value=["Negotiation", "Farming"],
),
html.Br(),
html.H6("Select game to highlight:"),
dcc.Dropdown(
id="games-dependent-tab3", options=[], multi=False, value=None
),
],
)
# sub-title card tab 1
sub_title_card_1 = dbc.Card(
dbc.CardBody(
[
html.Div(
[
dbc.Row(
[
dbc.Col(description_card_tab1(), width=8),
dbc.Col(
[
html.Div(
[
dbc.Button(
"Dataset Description",
id="open",
style={"margin-left": "15px"},
),
dbc.Modal(
[
dbc.ModalBody(data_set_desc_tab1()),
dbc.ModalFooter(
dbc.Button(
"Close",
id="close",
className="ml-auto",
)
),
],
id="modal",
),
html.Br(),
html.Br(),
html.Div(
[
dbc.Button(
"Game Trends Tab Description ",
id="open2",
style={"margin-left": "15px"},
),
dbc.Modal(
[
dbc.ModalBody(
tab_1_description()
),
dbc.ModalFooter(
dbc.Button(
"Close",
id="close2",
className="ml-auto",
)
),
],
id="modal2",
),
]
),
]
),
]
),
],
),
]
)
]
),
color="#F3F2F2",
)
# subtitle card tab 2
sub_title_card_2 = dbc.Card(
dbc.CardBody(
dbc.Row(
[
dbc.Col(
[
html.H5("Top Board Games"),
html.P(
"Use the interactive features below to view the top \
10 games based on choice of game categories, mechanics \
and publishers. "
),
],
width=9,
),
dbc.Col(
[
dbc.Button("Top Games Tab Description", id="open3"),
dbc.Modal(
[
dbc.ModalBody(tab_2_description()),
dbc.ModalFooter(
dbc.Button(
"Close", id="close3", className="ml-auto"
)
),
],
id="modal3",
),
],
width=3,
),
]
)
),
color="#F3F2F2",
)
# subtitle card tab 3
sub_title_card_3 = dbc.Card(
dbc.CardBody(
dbc.Row(
[
dbc.Col(
[
html.H5("Board Game Explorer"),
dcc.Markdown(
"Select either categories, mechanics or publishers.\
Then select different elements to view on the\
following figure. **Click** any game on the plot to\
view details in the left column. **Larger** points \
indicate games with higher user ratings scores."
),
],
width=9,
),
dbc.Col(
[
dbc.Button("3D Game Explorer Description", id="open4"),
dbc.Modal(
[
dbc.ModalBody(tab_3_description()),
dbc.ModalFooter(
dbc.Button(
"Close", id="close4", className="ml-auto"
)
),
],
id="modal4",
),
],
width=3,
),
]
)
),
color="#F3F2F2",
)
# card 1 containing the pop over "how to use tab 1" instructions and
# control card for tab 1
first_card_tab1 = dbc.Card(
dbc.CardBody(
[
html.Div(
id="control-card-tab-1",
children=[
html.Div(
[
dbc.Button("How to use", id="popover-target", color="info"),
dbc.Popover(
[
dbc.PopoverHeader(
"How To Use The Game Trends Tab?"
),
dbc.PopoverBody(
"Select either categories, mechanics or \
publishers below, then select elements \
from the drop-down to visualize on this \
tab. Use the slider to select the minimum \
number ratings for the games shown in the \
visualization. In the density plot tab,\
choose a time period by dragging the \
interactive slider. "
),
],
id="popover",
is_open=False,
target="popover-target",
),
]
),
html.Br(),
generate_control_card_tab1(),
],
)
]
),
color="#F3F2F2",
)
# card 2 for tab 1 containing the two plots on upper portion of tab 1,
# the scatter plot and the counts stacked histogram
second_card_tab1 = dbc.Card(
dbc.CardBody(
dbc.Tabs(
[
dbc.Tab(
label="Timeseries Plots",
children=(
[
html.Div(
[
dbc.Col(
[
html.Br(),
html.H5("Average Game Ratings"),
html.H6(
"Blue line shows annual average of \
ALL games in dataset"
),
html.Iframe(
# scatter plot
id="scatter",
style={
"border-width": "0",
"width": "100%",
"height": "250px",
},
),
html.Br(),
html.H5("Published Game Counts"),
html.Iframe(
# stacked histogram
id="count",
style={
"border-width": "0",
"width": "100%",
"height": "250px",
},
),
],
width={"size": 10, "offset": 1},
),
html.Br(),
]
)
]
),
),
dbc.Tab(
label="Density Plot",
children=(
html.Div(
[
dbc.Row(
dbc.Col(
[
html.Div(
[
html.Br(),
html.H5("Game Density Plots"),
html.Br(),
html.Div(
id="top-range-slider-output",
),
html.Br(),
html.Br(),
html.Div(
dcc.RangeSlider(
id="top-range-slider",
min=1950,
max=2016,
step=1,
value=[1990, 2010],
marks=slider_dict,
),
style={
"width": "60%",
"display": "inline-block",
"align-items": "center",
"justify-content": "center",
},
),
html.Br(),
html.Br(),
html.Br(),
html.Iframe(
id="density_plot",
style={
"border-width": "0",
"width": "1050px",
"height": "550px",
},
),
],
style={
"display": "inline-block",
"align-items": "center",
"justify-content": "center",
},
)
],
width={"size": 6, "offset": 1},
)
)
]
)
),
),
]
)
),
color="#F3F2F2",
)
# card 1 for tab 2 containing the pop over "how to use tab 2" instructions and
# control card for tab 2
first_card_tab2 = dbc.Card(
dbc.CardBody(
[
html.Div(
id="control-card-tab-2",
children=[
html.Div(
[
dbc.Button(
"How to use", id="popover-target2", color="info"
),
dbc.Popover(
[
dbc.PopoverHeader("How To Use The Top Games Tab?"),
dbc.PopoverBody(
"Select any combination of game categories, mechanics \
and publishers in the dropdowns below to populate \
the Top Games bar chart. User the slider to filter \
based on mininum number of user ratings for a game."
),
],
id="popover2",
is_open=False,
target="popover-target2",
),
]
),
html.Br(),
generate_control_card_tab2(),
],
),
]
),
color="#F3F2F2",
style={"height": "30em"},
)
# Tab 2 card containing the top 10 games bar chart for tab 2
top_n_games_card_tab2 = dbc.Card(
dbc.CardBody(
[
html.Br(),
dbc.Col(
[
html.H5("Top 10 Games"),
html.Div(
html.Iframe(
id="top-n-games",
style={
"border-width": "0",
"width": "100%",
"height": "1000px",
},
),
),
],
width={"size": 12, "offset": 1},
),
]
),
color="#F3F2F2",
style={"height": "30rem"},
)
# table card
top_n_games_table_card_tab2 = dbc.Card(
dbc.CardBody(
[
html.Div(
[
dbc.Button(
"View Game Details",
id="collapse-button",
className="mb-3",
color="primary",
),
dbc.Collapse(
dbc.Card(
dbc.CardBody(
[
html.H5("Top 10 Games Facts Table:"),
dash_table.DataTable(
id="top-n-games-datatable",
style_cell={
"whiteSpace": "normal",
"height": "auto",
"font-family": "Verdana",
"font-size": 10,
},
style_table={"overflowY": "scroll"},
sort_action="native",
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": "rgb(248, 248, 248)",
}
],
style_header={
"backgroundColor": "rgb(230, 230, 230)",
"fontWeight": "bold",
},
),
],
style={"height": "40rem", "width": "90rem"},
),
color="#F3F2F2",
),
id="collapse",
),
],
)
]
),
color="#F3F2F2",
)
# card 1 for tab 3 containing the pop over "how to use tab 2" instructions and
# control card for tab 3
control_card_tab3 = dbc.Card(
dbc.CardBody(
[
html.Div(
[
dbc.Button("How to use", id="popover-target3", color="info"),
dbc.Popover(
[
dbc.PopoverHeader("How To Use The 3D Game Explorer Tab?"),
dbc.PopoverBody(
"Select game categories, mechanics and publishers or populate \
the 3D plot. Click and drag to move around the \
3D plot, and use the mouse to zoom. Simply hover \
over and click a game to view game facts."
),
],
id="popover3",
is_open=False,
target="popover-target3",
),
]
),
html.Br(),
html.Div(
id="left-column-tab3",
className="four columns", # not sure this is needed
children=[generate_control_card_tab3()],
),
html.Br(),
html.H5("Selected Game Facts:"),
html.H6("Name and Rating:"),
html.Div(id="tsne-data-out-name"),
html.Div(id="tsne-data-out-score"),
html.Div(id="tsne-data-out-ratings"),
html.Br(),
html.H6("Categories:"),
html.Div(id="tsne-data-out-categories"),
html.Br(),
html.H6("Mechanics:"),
html.Div(id="tsne-data-out-mechanics"),
html.Br(),
html.H6("Publishers:"),
html.Div(id="tsne-data-out-publishers"),
]
),
color="#F3F2F2",
)
# card the tsne plot on tab 3
tab_3_plot = dbc.Card(
dbc.CardBody(
[
html.Div(
[
html.Br(),
html.H5("3D Game Explorer"),
dcc.Graph(id="tsne-3d-plot", style={"height": "80vh"}),
]
),
]
),
color="#F3F2F2",
)
# tab styling features for layout
tabs_styles = {"height": "44px" ""}
tab_style = {
"borderBottom": "1px solid #d6d6d6",
"padding": "6px",
"fontWeight": "bold",
}
tab_selected_style = {
"borderTop": "1px solid #d6d6d6",
"borderBottom": "1px solid #d6d6d6",
"backgroundColor": "#119DFF",
"color": "white",
"padding": "6px",
}
# set up app stylesheet and server
app = dash.Dash(
__name__,
title="Board Game Data Explorer",
external_stylesheets=[dbc.themes.BOOTSTRAP],
)
server = app.server
# app layout
app.layout = html.Div(
dbc.Container(
html.Div(
[ # dashboard title
dbc.Row(
[
dbc.Col(
[
html.Div(
id="title-top",
children=[title()],
style={"backgroundColor": "#DDDCDC"},
)
]
)
]
),
dcc.Tabs(
[
dcc.Tab(
label="Game Trends",
children=[
html.Div(
[
dbc.Row([((dbc.Col(sub_title_card_1)))]),
html.Br(),
dbc.Row(
[
dbc.Col(first_card_tab1, width=3),
dbc.Col(second_card_tab1, width=9),
]
),
]
)
],
style=tab_style,
selected_style=tab_selected_style,
),
dcc.Tab(
label="Top Games",
children=[
html.Div(
[
dbc.Row([((dbc.Col(sub_title_card_2)))]),
html.Br(),
dbc.Row(
[
dbc.Col(first_card_tab2, width=3),
dbc.Col(
[
top_n_games_card_tab2,
],
width=9,
),
]
),
html.Br(),
dbc.Row(
dbc.Col(top_n_games_table_card_tab2),
),
],
style={"height": 1250},
)
],
style=tab_style,
selected_style=tab_selected_style,
),
dcc.Tab(
label="3D Game Explorer",
children=[
html.Div(
[
dbc.Row(dbc.Col(sub_title_card_3)),
html.Br(),
dbc.Row(
[
dbc.Col(control_card_tab3, width=3),
dbc.Col(tab_3_plot, width=9),
]
),
]
)
],
style=tab_style,
selected_style=tab_selected_style,
),
]
),
],
style={"backgroundColor": "#DDDCDC"},
),
fluid=True,
),
style={"backgroundColor": "#DDDCDC"},
)
# Set up callbacks/backend
# modal data set description
@app.callback(
Output("modal", "is_open"),
[Input("open", "n_clicks"), Input("close", "n_clicks")],
[State("modal", "is_open")],
)
def toggle_modal_description(n1, n2, is_open):
"""
:return: Open modal callback if user clicks data set description button on tab 1.
"""
if n1 or n2:
return not is_open
return is_open
# modal tab 1 description
@app.callback(
Output("modal2", "is_open"),
[Input("open2", "n_clicks"), Input("close2", "n_clicks")],
[State("modal2", "is_open")],
)
def toggle_modal_tab1(n1, n2, is_open):
"""
:return: Open modal callback if user clicks tab 1 description button on tab 1.
"""
if n1 or n2:
return not is_open
return is_open
# Button over select for how to use tab 1 on control card
@app.callback(
Output("popover", "is_open"),
[Input("popover-target", "n_clicks")],
[State("popover", "is_open")],
)
def toggle_popover_tab1(n, is_open):
"""
:return: Open pop-over callback for how to use button for tab 1.
"""
if n:
return not is_open
return is_open
# radio button selection options to populate drop downs for tab1
@app.callback(
dash.dependencies.Output("radio-dependent-tab1", "options"),
dash.dependencies.Output("radio-dependent-tab1", "value"),
[dash.dependencies.Input("radio-selection-tab1", "value")],
)
def update_options_tab1(chosen_selection):
"""
:return: Callback to generate drop down based on radio button selection.
"""
col = chosen_selection
return [{"label": c, "value": c} for c in col_dict[col]], []
# scatter plot tab 1
@app.callback(
Output("scatter", "srcDoc"),
Input("radio-selection-tab1", "value"),
Input("radio-dependent-tab1", "value"),
Input("min-num-ratings", "value"),
)
def call_scatter_tab1(col, list_, n_ratings):
"""
:return: Scatter plot of game ratings on tab 1.
"""
chart = app_gr.scatter_plot_dates(boardgame_data, col, list_, n_ratings)
return chart.to_html()
# stacked histogram of counts annual published counts
@app.callback(
Output("count", "srcDoc"),
Input("radio-selection-tab1", "value"),
Input("radio-dependent-tab1", "value"),
Input("min-num-ratings", "value"),
)
def call_counts_tab1(col, list_, n_ratings):
"""
:return: Bar chart of published game counts on tab 1.
"""
chart2 = app_gr.count_plot_dates(boardgame_data, col, list_, n_ratings)
return chart2.to_html()
# year range slider output tab 1
@app.callback(
dash.dependencies.Output("top-range-slider-output", "children"),
dash.dependencies.Input("top-range-slider", "value"),
)
def range_slider_select_tab1(value):
"""
:return: Tab 1 year range slider output years for density plot on tab 1.
"""
transformed_value = [v for v in value]
return "Years Selected: {} to {}".format(transformed_value[0], transformed_value[1])
# density plot tab 1
@app.callback(
Output("density_plot", "srcDoc"),
Input("radio-selection-tab1", "value"),
Input("radio-dependent-tab1", "value"),
Input("top-range-slider", "value"),
Input("min-num-ratings", "value"),
)
def call_density_tab1(col, list_, value1, value2):
"""
:return: Game rating density plot on tab 1.
"""
transformed_value = [v for v in value1]
val1 = transformed_value[0]
val2 = transformed_value[1]
density_chart = app_gr.rank_plot_density(
boardgame_data,
col,
list_,
year_in=int(val1),
year_out=int(val2),
n_ratings=value2,
)
return density_chart.to_html()
# modal for description tab 2
@app.callback(
Output("modal3", "is_open"),
[Input("open3", "n_clicks"), Input("close3", "n_clicks")],
[State("modal3", "is_open")],
)
def toggle_modal_tab(n1, n2, is_open):
"""
:return: Open modal callback if user clicks tab 2 description button on tab 2.
"""
if n1 or n2:
return not is_open
return is_open
# modal for description tab 3
@app.callback(
Output("modal4", "is_open"),
[Input("open4", "n_clicks"), Input("close4", "n_clicks")],
[State("modal4", "is_open")],
)
def toggle_modal_tab3(n1, n2, is_open):
"""
:return: Open modal callback if user clicks tab 3 description button on tab 3.
"""
if n1 or n2:
return not is_open
return is_open
# Button over select for how to use tab 2 on control card
@app.callback(
Output("popover2", "is_open"),
[Input("popover-target2", "n_clicks")],
[State("popover2", "is_open")],
)
def toggle_popover_tab2(n, is_open):
"""
:return: Open pop-over callback for how to use button for tab 2.
"""
if n:
return not is_open
return is_open
# Button over select for how to use tab 3 on control card
@app.callback(
Output("popover3", "is_open"),
[Input("popover-target3", "n_clicks")],
[State("popover3", "is_open")],
)
def toggle_popover_tab3(n, is_open):
"""
:return: Open pop-over callback for how to use button for tab 2.
"""
if n:
return not is_open
return is_open
# top n games bar chart tab 2
@app.callback(
Output("top-n-games", "srcDoc"),
Input("category-widget-tab2", "value"),
Input("mechanics-widget-tab2", "value"),
Input("publisher-widget-tab2", "value"),
Input("min-num-ratings2", "value"),
)
def call_top_n_games_tab2(c, m, p, value2):
"""
:return: Top 10 games plot on tab 2.
"""
top_n_games = app_gr.top_n_plot(
data=boardgame_data,
cat=c,
mech=m,
pub=p,
n=10,
n_ratings=value2,
)
return top_n_games.to_html()
# data table top n games bar chart tab 2
@app.callback(
Output("top-n-games-datatable", "data"),
Output(component_id="top-n-games-datatable", component_property="columns"),
Input("category-widget-tab2", "value"),
Input("mechanics-widget-tab2", "value"),
Input("publisher-widget-tab2", "value"),
Input("min-num-ratings2", "value"),
)
def update_table_tab2(c, m, p, value2):
"""
:return: Data frame columns and ouput to
populate data table(dcc.DataTable) on tab 2.
"""
list_cols = [
"name",
"min_players",
"max_players",
"min_playtime",
"max_playtime",
"year_published",
"category",
"mechanic",
"publisher",
"average_rating",
"users_rated",
]
table = app_wr.call_boardgame_filter(
data=boardgame_data, cat=c, mech=m, pub=p, n=10, n_ratings=value2
)
columns = [{"name": col, "id": col} for col in list_cols]
columns[0]["name"] = ("Game Name",)
columns[1]["name"] = "Min Players"
columns[2]["name"] = "Max Players"
columns[3]["name"] = "Min Playtime"
columns[4]["name"] = "Max Playtime"
columns[5]["name"] = "Year Published"
columns[6]["name"] = "Categories"
columns[7]["name"] = "Mechanics"
columns[8]["name"] = "Publishers"
columns[9]["name"] = "Avg. User Rating"
columns[10]["name"] = "No. Ratings"
table_out = app_wr.clean_table(table)
data_out = table_out.to_dict("rows")
return data_out, columns
# radio button selection options to populate dropdowns for tab3
@app.callback(
dash.dependencies.Output("radio-dependent-tab3", "options"),
[dash.dependencies.Input("radio-selection-tab3", "value")],
)
def update_options_tab3(chosen_selection):
"""
:return: Callback to generate radio buttons on tab 3.
"""
col = chosen_selection
return [{"label": c, "value": c} for c in col_dict[col]]
# radio button selection options to populate game dropdown for tab3
@app.callback(
Output("games-dependent-tab3", "options"),
[Input("radio-selection-tab3", "value"), Input("radio-dependent-tab3", "value")],
)
def update_games_tab3(col, list_):
"""
:return: Callback to generate drop down based on radio button selection on tab 3.
"""
if col == "category":
games = app_wr.call_boardgame_filter(boardgame_data, cat=list_)
elif col == "mechanic":
games = app_wr.call_boardgame_filter(boardgame_data, mech=list_)
else:
games = app_wr.call_boardgame_filter(boardgame_data, pub=list_)
return games["name"].map(lambda x: {"label": x, "value": x})
# tsne graph tab 3
@app.callback(
Output("tsne-3d-plot", "figure"),
Input("radio-selection-tab3", "value"),
Input("radio-dependent-tab3", "value"),
Input("games-dependent-tab3", "value"),
)
def call_tsne_tab3(col, list_, game):
"""
:return: Interactive TSNE plot tab 3.
"""
fig = app_gr.graph_3D(boardgame_data, col, list_, game, extents_3d)
return fig
# text output from tsne graph click
@app.callback(
Output("tsne-data-out-name", "children"),
Output("tsne-data-out-score", "children"),
Output("tsne-data-out-ratings", "children"),
Output("tsne-data-out-categories", "children"),
Output("tsne-data-out-mechanics", "children"),
Output("tsne-data-out-publishers", "children"),
Input("tsne-3d-plot", "clickData"),
)
def display_click_message_tab3(clickData):
"""
:return: Selected game data to put into control card on tab 3.
"""
if clickData:
click_point_np = np.array(
[clickData["points"][0][i] for i in ["x", "y", "z"]]
).astype(np.float64)
# Create a mask of the point clicked
bool_mask_click = boardgame_data.loc[:, "x":"z"].eq(click_point_np).all(axis=1)
# retreive data
if bool_mask_click.any():
data_out = boardgame_data[bool_mask_click]
click_name = data_out.name.values[0]
click_sc = "Avg Rating: " + str(round(data_out.average_rating.values[0], 2))
click_rat = "No. of Ratings: " + str(data_out.users_rated.values[0])
click_cat = ", ".join(data_out.category.values[0])
click_mec = ", ".join(data_out.mechanic.values[0])
click_pub = ", ".join(data_out.publisher.values[0])
return click_name, click_sc, click_rat, click_cat, click_mec, click_pub
return None, None, None, None, None, None
# slider output container first tab
@app.callback(
dash.dependencies.Output("slider-output-container_2", "children"),
[dash.dependencies.Input("min-num-ratings", "value")],
)
def update_output_tab1(value):
"""
:return: Minimum number of games selected output
text on tab 1 based on slider input.
"""
return "Min Ratings: {}".format(value)
# slider output container second tab
@app.callback(
dash.dependencies.Output("slider-output-container_3", "children"),
[dash.dependencies.Input("min-num-ratings2", "value")],
)
def update_output_tab2(value):
"""
:return: Minimum number of games selected
output text on tab 2 based on slider input.
"""
return "Min Ratings: {}".format(value)
# collapse button for top 10 games fact table
@app.callback(
Output("collapse", "is_open"),
[Input("collapse-button", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse_tab2(n, is_open):
"""
:return: Open top 10 games fact table on tab 2 if clicked.
"""
if n:
return not is_open
return is_open
# run
if __name__ == "__main__":
app.run_server(debug=False, host="127.0.0.1", port=8055)
|
from plum import dispatch
from plum import dispatch
from netket.utils import struct
from .algorithms import AbstractODEAlgorithm
from .controllers import PIController
##
def default_controller(alg, cache, qoldinit=None):
# if ispredictive(alg): PredictiveController
# if isstandard(alg): IController
beta1, beta2 = _digest_beta1_beta2(alg, cache)
return PIController(beta1, beta2)
def _digest_beta1_beta2(alg, cache):
beta2 = beta2_default(alg)
beta1 = beta1_default(alg, beta2)
# should use rational type...
# return convert(QT, beta1)::QT, convert(QT, beta2)::QT
return beta1, beta2
@dispatch
def beta2_default(alg: AbstractODEAlgorithm):
return 2 / (5 * alg.alg_order) if alg.is_adaptive else 0
@dispatch
def beta1_default(alg: AbstractODEAlgorithm, beta2):
return 7 / (10 * alg.alg_order) if alg.is_adaptive else 0
@dispatch
def gamma_default(alg: AbstractODEAlgorithm, beta2):
return 9 / 10 if alg.is_adaptive else 0
|
import config
"""
...
Usage
---------
mongo = mongo('prefix name e.g. myprojectname')
mongo.write_to_*()
"""
class Mongo:
"""
A class used to represent the mongo container
...
Attributes
----------
prefix : str
The prefix applied for all container names
container_name : str
the name to give the containr inside the dockerfile
Methods
-------
get_docker_compose_content()
Returns to the docker compose file with the neccessary mongo content
get_env_content(root_path: str)
Returns the environmental file content
"""
def __init__(self, prefix_for_containers: str):
"""
Parameters
----------
prefix : str
The prefix applied to all container names
container_name : str
the name to give the containr inside the dockerfile
dockerfile_name : str
a formatted string that defines the name of the docker file for mongo
"""
self.prefix: str = prefix_for_containers
self.container_name: str = prefix_for_containers + '_mongo'
def get_docker_compose_content(self):
"""
Returns the neccessary content to the docker-compose.yml file for mongo
"""
docker_compose_content: List[str] = [
" mongodb:",
" container_name: {}".format(self.container_name),
" image: mongo",
" ports:",
' "27017:27017"',
" env_file:",
" - ./.docker/env/mongo.env",
" networks:",
" - {}-network".format(self.prefix)
]
return docker_compose_content
def get_env_content(self):
"""
Returns the environment file content
"""
content = [
'MONGODB_ROOT_USERNAME=root',
'MONGODB_ROOT_PASSWD=rootpassword',
'MONGODB_ROOT_ROLE=root',
'MONGODB_USERNAME=user',
'MONGODB_PASSWD=password',
'MONGODB_DBNAME=db',
'MONGODB_ROLE=readWrite'
]
return content |
import os
import logging
from datetime import datetime
# This is for the console message printing.
ConsoleLogParm = {
"MsgLevel": logging.INFO,
"MsgFormat": '[%(asctime)s] %(name)s [%(levelname)s] %(message)s',
"DateFormate": '%m/%d %I:%M:%S'
}
# This log file will not be implemented by default.
FileLogParm = {
"MsgLevel": logging.INFO,
"MsgFormat": '[%(asctime)s] %(name)s [%(levelname)s] %(message)s',
"DateFormate": None,
"Filename":r".\RiverwareABM.log"
}
# This log file will be created automatically when exercute
# runPyRAMID() in RiverwareWrap.
FileLogParm_runRiverwareABM = {
"MsgLevel": logging.DEBUG,
"MsgFormat": '[%(asctime)s] %(name)s [%(levelname)s] %(message)s',
"DateFormate": None,
"Filename":"PyRAMID.log"
}
MsglevelDict = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR
}
def addGlobalLogFile(Filename=None, Mode='w', MsgLevel=None):
"""Add a global log file.
Args:
Filename (str, optional): Log file name. Defaults to None.
Mode (str, optional): txt mode. Defaults to 'w'.
MsgLevel (str, optional): Message level. Defaults to None.
Returns:
None
"""
if Filename is None:
Filename = FileLogParm["Filename"]
# else:
# Filename = os.path.join(Directory, Filename)
# Create file handler at "root" level.
logger = logging.getLogger("PyRAMID")
logger.setLevel(logging.INFO)
fh = logging.FileHandler(Filename, Mode) # 'w' Overwrite, 'a' append
if MsgLevel is not None:
assert MsgLevel in ['debug', 'info', 'warning', 'error'], \
print("ValueError MsgLevel must be one of these [None, 'debug', "+\
"'info', 'warning', 'error'].")
fh.setLevel(MsglevelDict[MsgLevel])
else:
fh.setLevel(FileLogParm["MsgLevel"])
formatter_fh = logging.Formatter(FileLogParm["MsgFormat"], \
datefmt=FileLogParm["DateFormate"])
fh.setFormatter(formatter_fh)
logger.addHandler(fh)
# print the following message with package hierarchical structures.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#logger.info("Global log file is created at {}"\
# .format(os.path.join(os.getcwd(),Filename[2:])))
return None
def addLocalLogFile(filename, logger, Directory, Mode='w', MsgLevel=None):
"""Add local log file.
This function is to add the local log file for modules within
RiverwareABMpackage.
This function can be use for files outside of RiverwareABM
package if the proper logger is given. To get the proper
logger, we recommend user to run setLoggerForCustomizedFile().
Args:
filename (str): Log file name.
logger (object): logger object.
Directory (str): Log file folder directory.
Mode (str, optional): .txt mode. Defaults to 'w'.
MsgLevel (str, optional): Message level. Defaults to None.
Returns:
[list]: A list contains logger and file handler.
"""
Filename = os.path.join(Directory, filename)
fh = logging.FileHandler(Filename, Mode) # w: Overwrite the file, a: append
if MsgLevel is not None:
assert MsgLevel in ['debug', 'info', 'warning', 'error'], \
print("ValueError MsgLevel must be one of these [None, 'debug', "\
+ "'info', 'warning', 'error'].")
fh.setLevel(MsglevelDict[MsgLevel])
else:
fh.setLevel(FileLogParm["MsgLevel"])
formatter_fh = logging.Formatter(FileLogParm["MsgFormat"], \
datefmt=FileLogParm["DateFormate"])
fh.setFormatter(formatter_fh)
logger.addHandler(fh)
return logger, fh
def removeLocalLogFile(logger, fh):
"""Remove file handler from given logger.
Args:
logger (object): logger object.
fh (object): File handler object.
Returns:
object: logger object.
"""
logger.removeHandler(fh)
return logger
def setLoggerForCustomizedFile(AbbrOfThisPyFile, MsgLevel=None):
"""Set logger.
This function help to get hierarchical logger under the
root RiverwareABM with the given abbreviation of your file.
Args:
AbbrOfThisPyFile (str): Abbreviation of .py file name.
MsgLevel (str, optional): Message level. 'debug', 'info',
'warning', 'error'. Defaults to None.
Returns:
object: logger
"""
# Add the hierarchical logger under the root RiverwareABM.
logger = logging.getLogger("PyRAMID.{}".format(AbbrOfThisPyFile))
if MsgLevel is not None:
assert MsgLevel in ['debug', 'info', 'warning', 'error'], \
print("ValueError MsgLevel must be one of these [None, 'debug', "\
+ "'info', 'warning', 'error'].")
logger.setLevel(MsglevelDict[MsgLevel])
else:
logger.setLevel(ConsoleLogParm["MsgLevel"])
return logger
def createFolderWithDatetime(WD, Folder):
"""Create folder with datetime under given WD.
Args:
WD (str): Working directory.
Folder (str): Prefix of the folder name.
Returns:
[str]: Created folder's directory.
"""
assert os.path.isdir(WD),\
print("PathError Given directory not exists. {}".format(WD))
dt_string = datetime.now().strftime("%Y%m%d_%H%M%S")
# This is use to store all calibration data
WD_new = os.path.join(WD, Folder+"_{}".format(dt_string))
os.makedirs(WD_new)
print("Create {}".format(WD_new))
return WD_new |
b='Hong Pi Geng Nie Kong Zhi Xiao She Yu Jiang Voi Chong Qi Chen Sang Suo Qian Hui Shan E Ci Qiu Ke Ham Weng Zi Ji Mai Da Cuo Manh Chom Lou Kang Kuo Di Qie Mo Guo Hong Chao Hei Gei Gun Zaat Zoeng Cao Zhe Ke Keoi Gun Xu Peng Jue Gan Si Sui Que Wu Yan Peng Xiao Pan Zyun Sin Taan La Gun Doi Vo Quanh Vo San Lo Vo Beng Zhen Ji Jin Lian Ken Zhou Zao Cuoi Le Qi Bing Yin Pin Oe Tung Me Sou Lu Di Du Liao Zhuo Chang Gach Re Ghe Quanh Soi Chen Ta Jin Que Dao Cin Chen Rang Po Lai Zhong Xie Nen Gach Jiang Qu Lei Ca Que Gach Xiang Lei To Lan Lom La' |
from astropy.io import fits
# path_data = ''
import glob
imlist = sorted(glob.glob(f'{path_data}/Calib*NGC*0.fits'))
for inim in imlist:
with fits.open(inim) as hdul:
print(hdul.info()) |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
class RACI(LoginRequiredMixin, TemplateView):
template_name = "RACI/RACI.html"
login_url = '/'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
|
import atexit
import os
import random
import string
import subprocess
import sys
import pytest
class SubprocessIOC:
def __init__(self, ioc_py):
self.pv_prefix = "".join(
random.choice(string.ascii_uppercase) for _ in range(12)
)
sim_ioc = os.path.join(os.path.dirname(__file__), ioc_py)
cmd = [sys.executable, sim_ioc, self.pv_prefix]
self.proc = subprocess.Popen(
cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def kill(self):
if self.proc.returncode is None:
# still running, kill it and print the output
self.proc.kill()
out, err = self.proc.communicate()
print(out.decode())
print(err.decode())
@pytest.fixture
def cothread_ioc():
ioc = SubprocessIOC("sim_cothread_ioc.py")
yield ioc
ioc.kill()
def aioca_cleanup():
from aioca import purge_channel_caches, _catools
# Unregister the aioca atexit handler as it conflicts with the one installed
# by cothread. If we don't do this we get a seg fault. This is not a problem
# in production as we won't mix aioca and cothread, but we do mix them in
# the tests so need to do this.
atexit.unregister(_catools._catools_atexit)
# purge the channels before the event loop goes
purge_channel_caches()
@pytest.fixture
def asyncio_ioc():
ioc = SubprocessIOC("sim_asyncio_ioc.py")
yield ioc
ioc.kill()
aioca_cleanup()
@pytest.fixture
def asyncio_ioc_override():
ioc = SubprocessIOC("sim_asyncio_ioc_override.py")
yield ioc
ioc.kill()
aioca_cleanup()
|
#!/usr/bin/env python
import re
import os
import os.path
import struct
from numpy.fft import rfft
file_re = re.compile(r"([LR])(\-?\d+).*e(\d+)a.dat", re.IGNORECASE)
kemar = {}
for dirpath, _dn, files in os.walk("full"):
for fname in files:
#print dirpath, fname
m = file_re.match(fname)
if not m:
continue
fname = os.path.join(dirpath, fname)
mic = {'L': 0, 'R': 1}[m.group(1).upper()]
elev = int(m.group(2))
az = int(m.group(3))
with open(fname, "rb") as f:
data = f.read()
#print "Read %d" %len(data)
data = struct.unpack(">512h", data)
sdata = []
for i in xrange(0, len(data)):
sdata.append(0)
for i in xrange(0, len(data)):
sdata.append(data[i] / 32768.0)
data = rfft(sdata)
if elev not in kemar:
kemar[elev] = {az: {mic: data}}
continue
if az not in kemar[elev]:
kemar[elev][az] = {mic: data}
continue
kemar[elev][az][mic] = data
header = """#ifndef CLUNK_KEMAR_H
#define CLUNK_KEMAR_H
/*
This data is Copyright 1994 by the MIT Media Laboratory.
It is provided free with no restrictions on use,
provided the authors are cited when the data is used in any research or commercial application.
Bill Gardner [email protected] and Keith Martin [email protected]
*/
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* DO NOT EDIT THIS HEADER, IT'S AUTOGENERATED */
"""
print "found %d elevation angles" %len(kemar)
eangles = sorted(kemar.iterkeys())
header += "static const int KemarMinElevation = %d;\n" % min(eangles)
header += "static const int KemarMaxElevation = %d;\n" % max(eangles)
header += "static const int KemarElevationCount = %d;\n" % len(eangles)
header += "static const int KemarElevationStep = %d;\n" % ((max(eangles) - min(eangles)) / (len(eangles) - 1))
header += "static const unsigned KemarPoints = 513;\n"
header += """
struct kemar_elevation_data {
int elevation;
unsigned samples;
const float (*data)[2][513][2];
};
"""
header += "extern struct kemar_elevation_data kemar_data[14];\n"
header += """
#ifdef __cplusplus
}
#endif
#endif
"""
with open("kemar.h", "wb") as f:
f.write(header)
source = """#include "kemar.h"
"""
epilogue = """
struct kemar_elevation_data kemar_data[%d] =
{
""" %len(eangles)
for elev, az_dict in sorted(kemar.iteritems()):
print "elevation %d, items: %d" %(elev, len(az_dict))
array_name = "elev_%s" %(elev if elev >= 0 else ("m%d" % -elev))
source += """static const float %s[][2][513][2] =
{
""" %array_name
for az, mic_n_data in sorted(az_dict.iteritems()):
data0 = ""
data1 = ""
for a in mic_n_data[0]:
data0 += "{%g, %g}, " %(float(a.real), float(a.imag))
for a in mic_n_data[1]:
data1 += "{%g, %g}, " %(float(a.real), float(a.imag))
source += """ /* azimuth = %d */
{
{%s},
{%s}
},
""" %(az, data0, data1)
source += "};\n"
epilogue += "\t{%4d, %4d, %10s },\n" %(elev, len(az_dict), array_name)
epilogue += """};
"""
with open("kemar.c", "wb") as f:
f.write(source + epilogue)
|
from pygmy.model import *
from pygmy.config import config
from pygmy.database.sqlite import SqliteDatabase
from pygmy.database.postgresql import PostgreSQLDatabase
from pygmy.database.mysql import MySQLDatabase
from pygmy.database.base import Model
class DatabaseFactory:
@staticmethod
def create():
"""Get db class from config.db.engine"""
# TODO: make a utli.mapping
if config.database['engine'] == 'sqlite3':
database = SqliteDatabase()
elif config.database['engine'] == 'postgresql':
database = PostgreSQLDatabase()
elif config.database['engine'] == 'mysql':
database = MySQLDatabase()
else:
raise Exception(
"Unsupported DB type. Supported types are "
"postgresql/sqlite3 and mysql")
database.initialize(config.debug)
# Create all tables, if not already exists.
Model.metadata.create_all(database.engine)
# TODO DB: Run migrations
return database
|
# RemovedInDjango50Warning.
from django.core.serializers.base import (
PickleSerializer as BasePickleSerializer,
)
from django.core.signing import JSONSerializer as BaseJSONSerializer
JSONSerializer = BaseJSONSerializer
PickleSerializer = BasePickleSerializer
|
"""
The solver for training
"""
import tensorflow as tf
import numpy as np
import logging
from time import time
from os import path, makedirs
from model import *
from train400_data import *
from ops import *
from utils import *
flags = tf.app.flags
conf = flags.FLAGS
class Solver(object):
def __init__(self):
# path
self.data_dir = conf.data_dir
self.models_dir = conf.models_dir
self.logFilename = conf.log_name
# make dirs
if not path.exists(self.models_dir):
makedirs(self.models_dir)
# soft constraint for total epochs
self.num_epoch = conf.num_epoch + 1
# hyper parameters
self.batch_size = conf.batch_size
self.weight_decay = conf.weight_decay
# learning rate
self.lr = tf.placeholder(tf.float32)
self.base_lr = conf.base_lr
self.power = conf.power
self.end_lr = conf.end_lr
# resuming and finetune
self.resume = conf.resume
self.finetune = conf.finetune
if conf.iters == None:
if self.resume or self.finetune:
raise ValueError
# get datas and labels for training
dataset = Train400_Data(filename=path.join(self.data_dir, 'train400.tfrecord'),
num_epoch=self.num_epoch, sigma=50, batch_size=self.batch_size, scope='train400')
with tf.device('/gpu:0'):
# build the inference graph
net = Net(data=dataset.datas, label=dataset.labels, wl=self.weight_decay)
net.build_net()
# create an optimizer that performs gradient descent
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(net.total_loss)
self.total_loss = net.total_loss
def init_logging(self):
logging.basicConfig(
level = logging.DEBUG,
#format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
format = '%(message)s',
datefmt = '%m-%d %H:%M',
filename = self.logFilename,
filemode = 'w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
#formatter = logging.Formatter('LINE %(lineno)-4d : %(levelname)-8s %(message)s');
formatter = logging.Formatter('%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def train(self, disp_freq, save_freq, summary_freq):
# initialize logging
self.init_logging()
# operations for initialization
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=int(10e3))
# create a session for running operations in the graph
config = tf.ConfigProto(allow_soft_placement=True)
# config.gpu_options.allow_growth = True
config.gpu_options.allow_growth = False
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 # enable XLA
sess = tf.Session(config=config)
# initialize the variables (like the epoch counter)
sess.run(init_op)
# restore trained weights for resuming
if self.resume or self.finetune:
saver.restore(sess, path.join(conf.finetune_models_dir, 'model.ckpt-' + str(conf.iters)))
summary_writer = tf.summary.FileWriter(self.models_dir, sess.graph)
# start input enqueue threads
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# global iterations for resuming
if self.resume:
iters = conf.iters
# for training and finetune
else:
iters = 0
# accumulational variables
sum_time = 0
sum_loss = 0
# trace options and metadata
checkpoint_path = path.join(self.models_dir, 'model.ckpt')
# run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
# save iteration 0 and metagraph
saver.save(sess, checkpoint_path, global_step=iters)
# generate summary
# summary_str = sess.run(summary_op, options=run_options, run_metadata=run_metadata)
# summary_writer.add_run_metadata(run_metadata, 'step%03d' % iters)
# summary_writer.add_summary(summary_str, iters)
# decay policy of learning rate
decay_fraction_of_epochs = 1.0
self.decay_steps = (num_examples_per_epoch_for_train * self.num_epoch * decay_fraction_of_epochs) // self.batch_size
total_loss = sess.run(self.total_loss, feed_dict={self.lr: self.base_lr})
logging.info('step %d, loss = %.8f' % (iters, total_loss))
try:
# training loop
while not coord.should_stop():
# calculate current learning rate (truncated polynomial decay)
if iters <= self.decay_steps:
current_lr = (self.base_lr - self.end_lr) * pow((1 - float(iters) / self.decay_steps), (self.power)) + self.end_lr
else:
current_lr = self.end_lr
# run training steps or whatever
t1 = time()
_, total_loss = sess.run([self.train_op, self.total_loss], feed_dict={self.lr: current_lr})
t2 = time()
iters += 1
# accumulate
sum_time += t2 - t1
sum_loss += total_loss
# display
if iters % disp_freq == 0:
logging.info('step %d, loss = %.4f (lr: %.8f, time: %.2fs)'
% (iters, sum_loss / disp_freq, current_lr, sum_time))
sum_time = 0
sum_loss = 0
# save checkpoint
if iters % save_freq == 0:
saver.save(sess, checkpoint_path, global_step=iters, write_meta_graph=False)
# write summary
if iters % summary_freq == -1:
summary_str = sess.run(summary_op, options=run_options, run_metadata=run_metadata)
summary_writer.add_run_metadata(run_metadata, 'step%03d' % iters)
summary_writer.add_summary(summary_str, iters)
except tf.errors.OutOfRangeError:
logging.info('Done training -- epoch limit reached')
finally:
# when done, ask the threads to stop
coord.request_stop()
# wait for threads to finish
coord.join(threads)
sess.close()
|
from telegram_bot_sdk.telegram_objects.sticker import Sticker
class StickerSet:
"""This class represents a sticker set
:param name: Sticker set name
:type name: str
:param title: Sticker set title
:type title: str
:param is_animated: True, if the sticker set contains animated stickers
:type is_animated: bool
:param contains_masks: True, if the sticker set contains masks
:type contains_masks: bool
:param stickers: List of all set stickers
:type stickers: list of :ref:`object_stickers`
"""
def __init__(self, *, name, title, is_animated, contains_masks, stickers):
self.name = name
self.title = title
self.is_animated = is_animated
self.contains_masks = contains_masks
self.stickers = [Sticker(**x) for x in stickers] if stickers else None
|
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
"""Tests for the ``PdosWorkChain.get_builder_from_protocol`` method."""
from aiida.engine import ProcessBuilder
from aiida.plugins import WorkflowFactory
import pytest
from aiida_quantumespresso.common.types import ElectronicType, SpinType
PdosWorkChain = WorkflowFactory('quantumespresso.pdos')
@pytest.fixture
def get_pdos_generator_inputs(fixture_code, generate_structure):
"""Generate a set of default inputs for the ``PdosWorkChain.get_builder_from_protocol()`` method."""
return {
'pw_code': fixture_code('quantumespresso.pw'),
'dos_code': fixture_code('quantumespresso.dos'),
'projwfc_code': fixture_code('quantumespresso.projwfc'),
'structure': generate_structure('silicon')
}
def test_get_available_protocols():
"""Test ``PdosWorkChain.get_available_protocols``."""
protocols = PdosWorkChain.get_available_protocols()
assert sorted(protocols.keys()) == ['fast', 'moderate', 'precise']
assert all('description' in protocol for protocol in protocols.values())
def test_get_default_protocol():
"""Test ``PdosWorkChain.get_default_protocol``."""
assert PdosWorkChain.get_default_protocol() == 'moderate'
def test_default(get_pdos_generator_inputs, data_regression, serialize_builder):
"""Test ``PdosWorkChain.get_builder_from_protocol`` for the default protocol."""
inputs = get_pdos_generator_inputs
builder = PdosWorkChain.get_builder_from_protocol(**inputs)
assert isinstance(builder, ProcessBuilder)
data_regression.check(serialize_builder(builder))
def test_electronic_type(get_pdos_generator_inputs):
"""Test ``PdosWorkChain.get_builder_from_protocol`` with ``electronic_type`` keyword."""
with pytest.raises(NotImplementedError):
builder = PdosWorkChain.get_builder_from_protocol(
**get_pdos_generator_inputs, electronic_type=ElectronicType.AUTOMATIC
)
builder = PdosWorkChain.get_builder_from_protocol(
**get_pdos_generator_inputs, electronic_type=ElectronicType.INSULATOR
)
for namespace, occupations in zip((builder.scf, builder.nscf), ('fixed', 'tetrahedra')):
parameters = namespace['pw']['parameters'].get_dict()
assert parameters['SYSTEM']['occupations'] == occupations
assert 'degauss' not in parameters['SYSTEM']
assert 'smearing' not in parameters['SYSTEM']
def test_spin_type(get_pdos_generator_inputs):
"""Test ``PdosWorkChain.get_builder_from_protocol`` with ``spin_type`` keyword."""
with pytest.raises(NotImplementedError):
for spin_type in [SpinType.NON_COLLINEAR, SpinType.SPIN_ORBIT]:
builder = PdosWorkChain.get_builder_from_protocol(**get_pdos_generator_inputs, spin_type=spin_type)
builder = PdosWorkChain.get_builder_from_protocol(**get_pdos_generator_inputs, spin_type=SpinType.COLLINEAR)
for namespace in [builder.scf, builder.nscf]:
parameters = namespace['pw']['parameters'].get_dict()
assert parameters['SYSTEM']['nspin'] == 2
assert parameters['SYSTEM']['starting_magnetization'] == {'Si': 0.1}
def test_nscf_smearing_raises(get_pdos_generator_inputs):
"""Test ``PdosWorkChain.get_builder_from_protocol`` fails when NSCF uses smearing."""
overrides = {'nscf': {'pw': {'parameters': {'SYSTEM': {'occupations': 'smearing'}}}}}
with pytest.raises(ValueError, match=r'`SYSTEM.occupations` in `nscf.pw.parameters`'):
PdosWorkChain.get_builder_from_protocol(**get_pdos_generator_inputs, overrides=overrides)
|
#97% Accuracy on 2 epoch
from keras.datasets import mnist
import tensorflow.keras as keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, Conv2D, MaxPooling2D
from keras.utils import normalize
from keras.callbacks import TensorBoard
import matplotlib.pyplot as plt
model_Name = "Mnist_Conv_batchNormalise"
tensorboard_name = 'conv(32-64)_batchNormalise/'
#Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#Normalise the data
x_train = normalize(x_train, axis=-1)
x_test = normalize(x_test, axis=-1)
#Reshape Data
#2-for conv
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_train = x_train.reshape(x_train.shape[0], 28,28, 1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
input_shape=(28, 28, 1), data_format="channels_last"))
model.add(BatchNormalization(axis=-1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(BatchNormalization(axis=-1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
#compile model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
#Tensorboard for visualise
tensorboard = TensorBoard(log_dir='Mnist_log/' + tensorboard_name,
histogram_freq=30)
#Feed the data
model.fit(x_train, y_train, epochs=2, batch_size=128,
validation_data=(x_test, y_test), callbacks=[tensorboard])
#Save Model
model.save(model_Name + '.model')
#Delete existing model
del model
#load saved model
save_model = keras.models.load_model(model_Name + '.model')
|
#!/usr/bin/python
"""
Build a simple network from scratch, using mininet primitives.
This is more complicated than using the higher-level classes,
but it exposes the configuration details and allows customization.
For most tasks, the higher-level API will be preferable.
"""
import csv
import sys
import time
from mininet.net import Mininet
from mininet.node import Node
from mininet.link import Link
from mininet.log import setLogLevel, info
from mininet.util import quietRun
import pingparser
CTLR_IP = '2017:db8::ffaa'
CTLR_PRT = '6653'
# 0: Step wise testing, 1: Continues Testing
mode = 1
def stop_net(controller, cname, switch):
info("*** Stopping network\n")
controller.cmd('kill %' + cname)
switch.cmd('ovs-vsctl del-br dp0')
switch.deleteIntfs()
info('Net was removed\n')
def scratchNet(cname='controller', cargs='-v ptcp:'):
"Create network from scratch using Open vSwitch."
info("*** Creating nodes\n")
controller = Node('c0', inNamespace=False)
switch = Node('s0', inNamespace=False)
h0 = Node('h0')
h1 = Node('h1')
info("*** Creating links\n")
Link(h0, switch)
Link(h1, switch)
info("*** Configuring hosts\n")
h0.setIP('192.168.123.1/24')
h1.setIP('192.168.123.2/24')
info(str(h0) + '\n')
info(str(h1) + '\n')
info("*** Starting network using Open vSwitch\n")
controller.cmd(cname + ' ' + cargs + '&')
switch.cmd('ovs-vsctl del-br dp0')
switch.cmd('ovs-vsctl add-br dp0')
for intf in switch.intfs.values():
print switch.cmd('ovs-vsctl add-port dp0 %s' % intf)
# Note: controller and switch are in root namespace, and we
# can connect via loopback interface
s_cmd = 'ovs-vsctl set-controller dp0 tcp:[{}]:{}'.format(CTLR_IP, CTLR_PRT)
print s_cmd
switch.cmd(s_cmd)
ping_results = ['received,host,jitter,packet_loss,avgping,minping,time,sent,maxping\n']
try:
h0.cmd('echo "" > pings.txt')
if mode == 0:
step_wise_testing(h0, h1, ping_results)
else:
continuous_testing(h0, h1, ping_results)
except KeyboardInterrupt:
print "Warning: Caught KeyboardInterrupt, stopping network"
tm_local = time.localtime()
dt = time.gmtime()
file_name = 'pings_{}_{}_{}-{}_{}_{}.csv'.format(dt.tm_year, dt.tm_mon, dt.tm_mday, tm_local.tm_hour, tm_local.tm_min, tm_local.tm_sec)
f = open(file_name, 'w+')
for item in ping_results:
f.write(item)
stop_net(controller, cname, switch)
def step_wise_testing(h0, h1, ping_results):
while True:
if 'is_connected' not in quietRun('ovs-vsctl show'):
wait_for_controller_connection()
print "Press ENTER to execute Test\n"
line = sys.stdin.readline()
if line:
info("Key Input Accepted\n")
ping_test(h0, h1, ping_results)
def continuous_testing(h0, h1, ping_results):
while True:
if 'is_connected' not in quietRun('ovs-vsctl show'):
wait_for_controller_connection()
ping_test(h0, h1, ping_results)
time.sleep(1)
def ping_test(h0, h1, ping_results):
info("*** Running test\n")
ping_res = h0.cmdPrint('ping -c1 ' + h1.IP())
ping_res = pingparser.parse(ping_res)
tm_local = time.localtime()
ping_res['time'] = '{}:{}:{}'.format(tm_local.tm_hour, tm_local.tm_min, tm_local.tm_sec)
val_string = ','.join(ping_res.itervalues())
ping_results.append(val_string + "\n")
print ping_res
info("*** Sleep\n")
def wait_for_controller_connection():
info('*** Waiting for switch to connect to controller')
while 'is_connected' not in quietRun('ovs-vsctl show'):
time.sleep(1)
info('.')
info('Connected \n')
if __name__ == '__main__':
setLogLevel('info')
info('*** Scratch network demo (kernel datapath)\n')
Mininet.init()
scratchNet()
|
import datetime
from tkinter import font
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mpl_dates
from mplfinance.original_flavor import candlestick_ohlc
import pandas as pd
def candlestick_plot(data):
data_clean = {"Date": [], "Open": [], "High": [], "Low": [], "Close": []}
for i in data:
data_clean["Date"].append(datetime.datetime.fromtimestamp(i[0]/1000))
data_clean["Open"].append(float(i[1]))
data_clean["High"].append(float(i[2]))
data_clean["Low"].append(float(i[3]))
data_clean["Close"].append(float(i[4]))
data_clean = pd.DataFrame(data_clean)
data_clean["Date"] = pd.to_datetime(data_clean["Date"])
data_clean["Date"] = data_clean["Date"].apply(mpl_dates.date2num)
fig, ax = plt.subplots()
candlestick_ohlc(ax, data_clean.values, width = 0.2, colorup = "blue", colordown="red", alpha=0.8)
ax.grid()
ax.set_xlabel("Date")
ax.set_ylabel("Price")
fig.suptitle("Candlestick chart for OHCLV of the given symbol")
ax.xaxis.set_major_formatter(mpl_dates.DateFormatter("%d-%m-%y"))
fig.autofmt_xdate()
fig.tight_layout()
plt.show() |
'''
Descripttion:
version:
Author: zpliu
Date: 2021-07-01 11:18:14
LastEditors: zpliu
LastEditTime: 2021-07-01 11:18:16
@param:
'''
|
from ascntr.cli import cli
def test_simple_call(cli_runner, oracle_dsn, target_dsn):
pass
|
import torch
import src
MAX_TRIES = 100
EPS = 1e-8
def lipschitz_max_grad(net, X_observe, X_target, Y_target, k=1):
'''
Description:
Penalizes the model if it does not have a K-Lipschitz
maximum gradient-norm for any interpolant between samples
from the target and observed distributions.
Note for multidimensional inputs, K-Lipschitz quality is:
||f(x1) - f(x2)|| <= K*||x1 - x2||
Input:
net - torch.nn.Module, the training model.
X_observe - torch Tensor of size (N, D), samples from the
observed distribution.
X_target - torch Tensor of size (M, D), samples from the
target distribution.
Y_target - torch Tensor of size (M, T), output for X_target.
k - float, target Lipschitz constant.
Output:
loss - torch Tensor of shape (0), loss value from which
backpropogation should begin.
'''
device = X_observe.device
X_observe = X_observe.cpu()
with torch.no_grad():
X_choice, Y_choice = _rand_diff_choice(X_observe, X_target, Y_target)
X_interp = _rand_blend(X_observe, X_choice)
Y_interp = net(X_interp.to(device))
grad = torch.autograd.grad(
[Y_interp.mean()],
net.parameters(),
create_graph=True,
only_inputs=True
)[0]
return ((k-grad.norm(p=2))**2).mean()
# === PRIVATE ===
def _rand_blend(X1, X2):
assert X1.size() == X2.size()
alpha = torch.rand_like(X1) * (1-EPS)
return alpha*X1 + (1-alpha)*X2
def _rand_diff_choice(X_observe, X_target, Y_target):
N = X_target.size(0)
M = X_observe.size(0)
assert N >= M
I = src.tensortools.rand_indices(N)[:M]
X_choice = X_target[I].clone()
Y_choice = Y_target[I].clone()
too_close = _check_too_close(X_observe, X_choice)
for i in range(MAX_TRIES):
m = too_close.long().sum().item()
if m == 0:
break
I = src.tensortools.rand_indices(N)[:m]
X_choice[too_close] = X_target[I].clone()
Y_choice[too_close] = Y_target[I].clone()
too_close = _check_too_close(X_observe, X_choice)
assert i < MAX_TRIES # ERROR: No close matches were found!
return X_choice, Y_choice
def _check_too_close(X1, X2):
N = X1.size(0)
return (X1.view(N, -1) - X2.view(N, -1)).norm(p=2, dim=1) < EPS
|
from django.db import models
class Instances(models.Model):
channelId = models.UUIDField
channelName = models.TextField()
def __str__(self):
return "%s" % (self.channelId) |
import numpy as np
class NotInVocabularyError(Exception):
"""Auxillary class."""
pass
class GloVeDataIterator():
"""
Args:
get_text_iterator: Callable that returns an iterator that yields
text, as a list of words (strings).
window_size: Positive integer.
min_word_count: Non-negative integer.
"""
def __init__(self,
get_text_iterator,
window_size,
min_word_count=6):
self.get_text_iterator = get_text_iterator
self.window_size = window_size
self.min_word_count = min_word_count
# Get vocabulary first, and then the co-occurance counts
self._vocab = self._get_vocabulary()
self._X = self._get_cooccurance_counts()
self.vocab_size = len(self._vocab)
def _get_vocabulary(self):
"""Returns a dict mapping from word (string) to its ID (as integer)
in vocabulary."""
word_counts = {}
for text in self.get_text_iterator():
for word in text:
try:
word_counts[word] += 1
except KeyError:
word_counts[word] = 1
word_counts = [(w, c) for w, c in word_counts.items()
if c >= self.min_word_count]
word_counts = sorted(word_counts, key=lambda _: _[1], reverse=True)
vocab = {w: i for i, (w, c) in enumerate(word_counts)}
return vocab
def _get_cooccurance_counts(self):
r"""Returns a dict mapping from word-word ID pair :math:`(i, j)` to
its co-occurance counts :math:`X_ij`."""
# Stats the word-word co-occurance counts
X = {} # initialize.
def count_in_text(text):
window = [] # initialize.
for word in text:
window.append(word)
if len(window) > 2 * self.window_size + 1:
window = window[1:]
for other_word in window[:-1]:
try:
ij = (self.get_word_id(word),
self.get_word_id(other_word))
try:
X[ij] += 1
except KeyError:
X[ij] = 1
except NotInVocabularyError:
pass
for text in self.get_text_iterator():
count_in_text(text)
return X
def get_word_id(self, word):
"""Returns the ID of the `word` in the vocabulary.
Args:
word: String.
Returns:
Non-negative integer in the range [0, self.vocab_size).
Raise:
NotInVocabularyError.
"""
try:
return self._vocab[word]
except KeyError:
raise NotInVocabularyError
def X(self, i, j):
"""Returns the word-word co-occurance counts of the i-th and the j-th
words in the vocabulary.
Args:
i: Non-negative integer in the range [0, self.vocab_size).
j: Non-negative integer in the range [0, self.vocab_size).
Returns:
Non-negative integer.
"""
assert i < self.vocab_size and j < self.vocab_size
try:
return self._X[(i, j)]
except KeyError:
return 0
def __iter__(self):
"""
Yields:
Numpy array with shape `[None, 3]` and int-dtype, where the first
column is for `i`, the second for `j` and the final one for `X_ij`.
"""
while True:
i, j = np.random.randint(0, self.vocab_size - 1, size=[2])
yield np.array([i, j, self.X(i, j)])
if __name__ == '__main__':
"""Test"""
import os
from gensim.test.utils import datapath
from gensim.corpora import WikiCorpus
# Get file path
script_path = os.path.abspath(__file__)
data_dir = os.path.join(script_path, '../../../dat')
file_name = 'enwiki-latest-pages-articles1.xml-p10p30302.bz2'
file_path = os.path.join(data_dir, file_name)
wiki_corpus = WikiCorpus(datapath(file_path))
def get_text_iterator(corpus=wiki_corpus):
return corpus.get_texts()
glove_data_iter = GloVeDataIterator(get_text_iterator, window_size=2)
# Display results
print('{} words in vocabulary.'.format(glove_data_iter.vocab_size))
non_vanishing_counts = len(
[v for k, v in glove_data_iter._X.items() if v > 0])
print('{} non-vanishing counts in the word-word co-occurance counts.'
.format(non_vanishing_counts))
|
"""
rasters.py includes code to work with raster datasets,
particularly to sample raster using a PrmsDiscretization
object and then use it as input data
"""
import numpy as np
class Rasters(object):
"""
Raster object which allows the user
to snap a raster to a grid.
Parameters
----------
raster : osgeo.gdal.Dataset object
"""
def __init__(self, raster):
self.raster = raster
self._band_data = None
self._xpoints = None
self._ypoints = None
self._xypoints = None
@property
def extent(self):
"""
Returns
-------
(xmin, xmax, ymin, ymax)
"""
return (
self.raster.bounds.left,
self.raster.bounds.right,
self.raster.bounds.bottom,
self.raster.bounds.top,
)
@property
def xpoints(self):
"""
Returns
-------
Cell centered x points for raster values
"""
if self._band_data is not None:
if self._xpoints is None:
xmin, xmax = self.extent[0:2]
ynum, xnum = self._band_data.shape
t = np.linspace(xmin, xmax, xnum)
self._xpoints = np.tile(t, (ynum, 1))
return self._xpoints
@property
def ypoints(self):
"""
Returns
-------
Cell centered y points for raster values
"""
if self._band_data is not None:
if self._ypoints is None:
ymin, ymax = self.extent[2:4]
ynum, xnum = self._band_data.shape
t = np.linspace(ymin, ymax, ynum)
self._ypoints = np.tile(t, (xnum, 1)).T
return self._ypoints
@property
def xypoints(self):
"""
Returns
-------
np.ndarray cell centered x, y points for raster values
"""
if self._xypoints is None:
if self.xpoints is None or self.ypoints is None:
return
self._xypoints = np.array([self.xpoints, self.ypoints])
return self._xypoints
@property
def band_array(self):
"""
Returns
-------
np.ndarray of the raster band
"""
if self._band_data is not None:
nodata = self.raster.nodata
self._band_data[self._band_data == nodata] = np.nan
return self._band_data
def sample_discretization(self, prms_discretizaiton):
"""
Method to sample the raster using the prms_discretization
object cell centers
Parameters
----------
prms_discretizaiton : PrmsDiscretization object
Returns
-------
np.ndarray
"""
xy = list(
zip(
prms_discretizaiton.x_hru_centers,
prms_discretizaiton.y_hru_centers,
)
)
temp = np.array(list(self.raster.sample(xy))).flatten()
temp[temp == self.raster.nodata] = np.nan
return temp
def set_raster_band(self, band):
"""
Method to use GDAL to set the raster band
for visualization
Parameters
----------
band : int
raster band number
"""
self._band_data = self.raster.read(band)
@staticmethod
def load(name):
"""
Parameters
----------
name : raster file name
Returns
-------
Rasters object
"""
import rasterio
raster = rasterio.open(name)
return Rasters(raster)
|
expected_output = {
"services-accounting-information": {
"flow-aggregate-template-detail": {
"flow-aggregate-template-detail-ipv4": {
"detail-entry": [{
"byte-count": "184",
"input-snmp-interface-index": "1014",
"mpls-label-1": "299792",
"mpls-label-2": "16",
"mpls-label-3": "0",
"output-snmp-interface-index": "624",
"packet-count": "2"
}]
}
}
}
} |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Pennant(MakefilePackage):
"""PENNANT is an unstructured mesh physics mini-app designed
for advanced architecture research. It contains mesh data
structures and a few physics algorithms adapted
from the LANL rad-hydro code FLAG, and gives a sample of
the typical memory access patterns of FLAG.
"""
homepage = "https://github.com/lanl/PENNANT"
url = "https://github.com/lanl/PENNANT/archive/pennant_v0.9.tar.gz"
tags = ['proxy-app']
version('0.9', '4f21ba3836b2721436277308c2e33f45')
version('0.8', 'a1afff4914fef8140c3024a02d7c993c')
version('0.7', 'd642a030d5388f65f799504803794a4e')
version('0.6', '8ab2d4b47ec9870643bfe6f262cd47a4')
version('0.5', '534547878c698b9926e2886c74e10831')
version('0.4', '0f67d8da0a92bd42d92a4823d3e4dbe1')
variant('mpi', default=True, description='Build with MPI support')
variant('openmp', default=True, description='Build with OpenMP support')
variant('debug', default=False, description='Enable debug')
depends_on('mpi', when='+mpi')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
debug = '-g'
opt = '-O3'
if self.compiler.name == 'intel':
opt += ' -fast -fno-alias'
if self.compiler.name == 'pgi':
opt += ' -fastsse'
makefile.filter(
'CXXFLAGS_DEBUG .*',
'CXXFLAGS_DEBUG := {0}'.format(debug))
makefile.filter(
'CXXFLAGS_OPT .*',
'CXXFLAGS_OPT := {0}'.format(opt))
makefile.filter(
'CXXFLAGS_OPENMP .*',
'CXXFLAGS_OPENMP := {0}'.format(self.compiler.openmp_flag))
if '+mpi' in spec:
makefile.filter(
'CXX .*',
'CXX := {0}'.format(spec['mpi'].mpicxx))
else:
makefile.filter('-DUSE_MPI', '#')
makefile.filter('CXX .*', 'CXX := c++')
if '+openmp' not in spec:
makefile.filter('.*CXXFLAGS_OPENMP.*', '#')
if '+debug' in spec:
makefile.filter(
'.*(CXXFLAGS_OPT).*',
'CXXFLAGS := $(CXXFLAGS_DEBUG)')
def install(self, spec, prefix):
def install_dir(dirname):
install_tree(dirname, join_path(prefix, dirname))
mkdirp(prefix.bin)
install('build/pennant', prefix.bin)
install_dir('doc')
install_dir('test')
install('LICENSE', prefix)
install('README', prefix)
|
import os
from django.conf import settings
from django.core.management import BaseCommand
from django_test_tools.generators.crud_generator import GenericTemplateWriter
from django_test_tools.generators.model_generator import FactoryBoyGenerator
from ...app_manager import DjangoAppManager
PRINT_IMPORTS = """
import string
from random import randint
from pytz import timezone
from django.conf import settings
from factory import Iterator
from factory import LazyAttribute
from factory import SubFactory
from factory import lazy_attribute
from factory.django import DjangoModelFactory, FileField
from factory.fuzzy import FuzzyText, FuzzyInteger
from faker import Factory as FakerFactory
faker = FakerFactory.create()
"""
PRINT_FACTORY_CLASS = """
class {0}Factory(DjangoModelFactory):
class Meta:
model = {0}
"""
PRINT_CHARFIELD = """ {} = LazyAttribute(lambda x: faker.text(max_nb_chars={}))"""
PRINT_CHARFIELD_NUM = """ {} = LazyAttribute(lambda x: FuzzyText(length={}, chars=string.digits).fuzz())"""
PRINT_CHARFIELD_LETTERS = """ {} = LazyAttribute(lambda x: FuzzyText(length={}, chars=string.ascii_letters).fuzz())"""
PRINT_CHARFIELD_CHOICES = """ {} = Iterator({}.{}, getter=lambda x: x[0])"""
PRINT_DATETIMEFIELD = """ {} = LazyAttribute(lambda x: faker.date_time_between(start_date="-1y", end_date="now",
tzinfo=timezone(settings.TIME_ZONE)))"""
PRINT_FOREIGNKEY = """ {} = SubFactory({}Factory){}"""
PRINT_FILEFIELD = """ {} = FileField(filename='{}.{}')"""
PRINT_BOOLEANFIELD = """ {} = Iterator([True, False])"""
PRINT_INTEGERFIELD = """ {} = LazyAttribute(lambda o: randint(1, 100))"""
PRINT_IPADDRESSFIELD = """ {} = LazyAttribute(lambda o: faker.ipv4(network=False))"""
PRINT_TEXTFIELD = """ {} = LazyAttribute(lambda x: faker.paragraph(nb_sentences=3, variable_nb_sentences=True))"""
PRINT_DECIMALFIELD = """ {} = LazyAttribute(lambda x: faker.pydecimal(left_digits={}, right_digits={}, positive=True))"""
PRINT_UNSUPPORTED_FIELD = """ #{} = {} We do not support this field type"""
PRINT_COUNTRYFIELD = """ {} = Iterator(['PA', 'US'])"""
# noinspection PyProtectedMember
class ModelFactoryGenerator(object):
def __init__(self, model):
self.model = model
def _generate(self):
factory_class_content = list()
factory_class_content.append({'print': PRINT_FACTORY_CLASS, 'args': [self.model.__name__]})
for field in self.model._meta.fields:
field_type = type(field).__name__
field_data = dict()
if field_type in ['AutoField', 'AutoCreatedField', 'AutoLastModifiedField']:
pass
elif field_type in ['DateTimeField', 'DateField']:
field_data = {'print': PRINT_DATETIMEFIELD, 'args': [field.name]}
factory_class_content.append(field_data)
elif field_type == 'CharField':
field_data = self._get_charfield(field)
factory_class_content.append(field_data)
elif field_type == 'ForeignKey':
related_model = field.remote_field.model.__name__
field_data = {'print': PRINT_FOREIGNKEY,
'args': [field.name, related_model, '']}
factory_class_content.append(field_data)
elif field_type == 'BooleanField':
field_data = {'print': PRINT_BOOLEANFIELD, 'args': [field.name]}
factory_class_content.append(field_data)
elif field_type == 'TextField':
field_data = {'print': PRINT_TEXTFIELD, 'args': [field.name]}
factory_class_content.append(field_data)
elif field_type == 'IntegerField':
field_data = {'print': PRINT_INTEGERFIELD, 'args': [field.name]}
factory_class_content.append(field_data)
elif field_type == 'FileField':
field_data = {'print': PRINT_FILEFIELD, 'args': [field.name, field.name, 'xlsx']}
factory_class_content.append(field_data)
elif field_type == 'DecimalField' or field_type == 'MoneyField':
max_left = field.max_digits - field.decimal_places
max_right = field.decimal_places
field_data = {'print': PRINT_DECIMALFIELD,
'args': [field.name, max_left, max_right]}
factory_class_content.append(field_data)
elif field_type == 'GenericIPAddressField':
field_data = {'print': PRINT_IPADDRESSFIELD, 'args': [field.name]}
factory_class_content.append(field_data)
elif field_type == 'CountryField':
field_data = {'print': PRINT_COUNTRYFIELD, 'args': [field.name]}
factory_class_content.append(field_data)
else:
field_data = {'print': PRINT_UNSUPPORTED_FIELD,
'args': [field.name, field_type]}
factory_class_content.append(field_data)
return factory_class_content
def _get_charfield(self, field):
field_data = dict()
if field.choices is not None and len(field.choices) > 0:
field_data = {'print': PRINT_CHARFIELD_CHOICES, 'args': [field.name, self.model.__name__, 'CHOICES']}
return field_data
else:
if self._is_number(field.name):
field_data = {'print': PRINT_CHARFIELD_NUM,
'args': [field.name, field.max_length]}
return field_data
else:
if field.max_length >= 5:
field_data = {'print': PRINT_CHARFIELD,
'args': [field.name, field.max_length]}
else:
field_data = {'print': PRINT_CHARFIELD_LETTERS,
'args': [field.name, field.max_length]}
return field_data
def _is_number(self, field_name):
num_vals = ['id', 'num']
for nv in num_vals:
if nv in field_name.lower():
return True
return False
def __str__(self):
printable = list()
for print_data in self._generate():
try:
printable.append(print_data['print'].format(*print_data['args']))
except IndexError as e:
print('-' * 74)
print('{print} {args}'.format(**print_data))
raise e
return '\n'.join(printable)
class Command(BaseCommand):
"""
$ python manage.py generate_factories project.app
"""
def add_arguments(self, parser):
parser.add_argument('app_name')
# parser.add_argument("-l", "--list",
# action='store_true',
# dest="list",
# help="List employees",
# )
# parser.add_argument("-a", "--assign",
# action='store_true',
# dest="assign",
# help="Create unit assignments",
# )
#
#
parser.add_argument("--filename",
dest="filename",
help="Output filename",
default=None,
)
# parser.add_argument("--start-date",
# dest="start_date",
# help="Start date for the assignment",
# default=None,
# )
# parser.add_argument("--fiscal-year",
# dest="fiscal_year",
# help="Fiscal year for assignments",
# default=None,
# )
# parser.add_argument("-u", "--username",
# dest="usernames",
# help="LDAP usernames for employees",
# nargs='+',
# )
def handle(self, *args, **options):
app_name = options.get('app_name')
if options.get('filename'):
filename = os.path.join(settings.TEST_OUTPUT_PATH, options.get('filename'))
generator = FactoryBoyGenerator()
factory_data = generator.create_template_data(app_name)
template_name = 'factories.py.j2'
writer = GenericTemplateWriter(template_name)
writer.write(factory_data, filename)
else:
app_manager = DjangoAppManager()
app = app_manager.get_app(app_name)
if not app:
self.stderr.write('This command requires an existing app name as '
'argument')
self.stderr.write('Available apps:')
for app in sorted(app_manager.installed_apps):
self.stderr.write(' %s' % app)
else:
self.stdout.write(PRINT_IMPORTS)
for model in app.get_models():
model_fact = ModelFactoryGenerator(model)
self.stdout.write(str(model_fact))
|
import os
import numpy as np
#---#
# import cv2
# from scipy import ndimage
import imageio
#---#
import matplotlib.pyplot as plt
# data_path = '/opt/carnd_p3/data/' # On GPU-enabled workspace
data_path = '~/opt/carnd_p3/data/' # On local machine
# Expand the path
data_path = os.path.expanduser(data_path)
# The image to check
# sample = "center_2016_12_01_13_42_18_344.jpg"
# sample = "right_2016_12_01_13_42_18_344.jpg"
sample = "left_2016_12_01_13_42_18_344.jpg"
current_path_center = data_path + '/IMG/' + sample.split('/')[-1]
image_ = imageio.imread(current_path_center)
image_crop = image_[50:-20, :]
image_flip = np.fliplr(image_crop)
# plot
#--------------------------#
plt.figure()
plt.imshow(image_)
# crop
plt.figure()
plt.imshow(image_crop)
# crop, flip
plt.figure()
plt.imshow(image_flip)
# Show
plt.show()
|
"""
Команда, которая показывает, что бот работает
"""
from time import time
import json
import platform
import psutil
from random import randint
from datetime import datetime
import sys
from vkbottle.bot import Blueprint, Message
from utils.edit_msg import edit_msg
from filters import ForEveryoneRule
bp = Blueprint("Ping-pong command")
quote = ['Я не волшебник, я всего лишь учусь','Это не баг — это незадокументированная фича.','Удаленный код — отлаженный код.','Чтобы понять рекурсию, нужно сперва понять рекурсию.', 'Самая сложная часть в дизайне… держаться подальше от фич.', 'Если сразу не получилось хорошо, назовите это версией 1.0.', 'format c: - лучший антивирус', 'Куплюклавиатурусработающимпробелом', 'Чудеса случаются.', 'Предположим, что у тебя есть 1000 рублей... Ну, для круглого счета возьмем 1024...', '99% ошибок компьютера сидит в полуметре от монитора.', 'Невозможно победить того, кто не сдается', 'Моё "люблю" очень дорогого стоит. Говорю это редко и мало кому.', 'Миллионы людей не заменят тебя. Никогда.', 'Ничего в этой жизни не дается легко']
text = quote[randint(0, (len(quote)-1))]
datanow = datetime.now()
@bp.on.message(ForEveryoneRule("ping"), text="<prefix>пинг")
async def ping_handler(message: Message):
"""
> !пинг
> 🏓 | Понг!
> ⏱ | Ответ за 0.05 секунд
(если включен режим debug)
> 🏓 | Понг!
> ⏱ | Ответ за 0.05 секунд (debug)
> 💻 | ОС: Microsoft Windows 11
> 🔧 | Процессор: 21.2%
> ⚙ | Работает 97 часов (4 дней)
> ❤ | [id322615766|VK+]
"""
start = time()
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
if content["debug"] is not True:
end = time()
result = round(end - start, 4)
await edit_msg(
bp.api,
message,
f"🏓 | Понг!\n⏱ | Ответ за {result} секунд (debug-Off)\n"
f"🕗 | Время: %c\n"
f"💻 | ОС: {system} \n"
f"🔧 | Процессор: {cpu}\n"
f"⚙ | Бот работает {work_hours} часов, это({work_days} дней)\n"
f"📖 | Цитата дня: random.choiceпинг{quote}\n"
f"❤ | [vk.com/public210991551|Создатель]\n"
f"👤 | [id314119670|Кодер/Тех-Админ]"
)
else:
try:
cpu = str(psutil.cpu_percent()) + "%"
except PermissionError:
cpu = "не известно (android?)"
system_name = platform.system()
"""
Если бот запущен на ОС Windows 11, то platform.release()
вернет 10, что бы этого избежать, можно сделать проверку
на версию системы:
"""
if system_name == "Windows":
if int(platform.version().split(".")[2]) > 20000:
system_version = "11"
else:
system_version = platform.release()
else:
system_version = platform.release()
system = system_name + " " + system_version
with open("time_started.txt", "r", encoding="utf-8") as file:
work_hours = round((round(time()) - int(file.read())) / 3600, 4)
work_days = work_hours // 24
end = time()
result = round(end - start, 4)
await edit_msg(
bp.api, message,
f"🏓 | Понг!\n⏱ | Ответ за {result} секунд (debug-Off)\n"
f"🕗 | Время: {datanow} \n"
f"💻 | ОС: {system} \n"
f"🔧 | Процессор: {cpu}\n"
f"⚙ | Бот работает {work_hours} часов, это({work_days} дней)\n"
f"📖 | Цитата запуска: {text}\n \n"
f"❤ | [vk.com/public210991551|Создатель]\n"
f"👤 | [id314119670|Кодер/Тех-Админ]"
)
|
#!/usr/bin/python
print("Hello World")
print("Added line from src2")
print("Added line from src1")
import bar
import baz
|
# -*- coding: utf-8 -*-
# @Author: Muhammad Alfin N
# @Date: 2021-03-30 20:13:14
# @Last Modified by: Muhammad Alfin N
# @Last Modified time: 2021-04-07 18:38:41
import numpy as np
import imageio
import random
import torch
import PIL
import os
import cv2
import time
import pyrebase
from models import get_instrumented_model
from decomposition import get_or_compute
from ipywidgets import fixed,Layout
from skimage import img_as_ubyte
from tqdm import tqdm,trange
from config import Config
from PIL import Image
torch.autograd.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
class Model:
def __init__(self, name, num_components=60):
if name == 'stroller':
model_class = 'stroller_193'
if name == 'pushchair':
model_class = 'pushchair_572'
if name == 'childseat':
model_class = 'childseat_719'
self.name = name
self.config = Config(
model='StyleGAN2',
layer='style',
output_class=model_class,
components=num_components,
use_w=True,
batch_size=5000
)
self.model = self.init_model()
self.storage = self.init_storage()
def init_storage(self):
config = {
"apiKey": "AIzaSyCP9sj_xIogRC_5EowwMwQIh9MEvLlCqrk",
"authDomain": "niomata-745ae.firebaseapp.com",
"projectId": "niomata-745ae",
"storageBucket": "niomata-745ae.appspot.com",
"messagingSenderId": "933534202946",
"appId": "1:933534202946:web:8c1d2b2b94b772533a81db",
"measurementId": "G-MZCLX7LM9G",
"databaseURL":"https://niomata-745ae-default-rtdb.firebaseio.com/"
}
firebase = pyrebase.initialize_app(config)
return firebase.storage()
def init_model(self):
inst = get_instrumented_model(self.config.model,
self.config.output_class,
self.config.layer, torch.device('cuda'),
use_w=self.config.use_w)
model = inst.model
return model
def normalize_z(self,z):
torch.autograd.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
if self.name == 'stroller':
good_seed = [2,3,5,6,7,8,12,13,15,19,20,22,30,39,41,42,43,51,57,63,68,72,91,99,102,144,155,158,167,178,187,239,240,243,297,298,322,323,333,334,335,344,370,373,374,376,384,423,425,436,445,447,472,475,484,499,500,527,576,581,582,595,631,671,689,690,698,708,838,895]
if self.name == 'pushchair':
good_seed = [2,3,4,5,7,8,9,10,11,12,13,20,21,25,31,50,59,62,63,64,107,108,120,129,134,155,191,217,224,229,230,232,242,243,244,247,250,291,294,326,341,366,369,370,373,385,391,392,393,398,417,425,440,451,459,462,472,494,501,515,522,523,534,525,545,553,559]
if self.name == 'childseat':
good_seed = [2,3,27,38,45,48,49,68,78,82,86,90,91,96,110,118,149,154,155,158,159,160,162,167,201,202,206,290,294,295,296,297,302,309,350,351,380,412,437,449,450,451,452,468,500,503,508,519,520,560,561,565,641,643,684,687,715,777,778,813,878,885,889,915,926,927,955,937,972,975,988,993,994]
base = self.model.sample_latent(1, seed=random.choice(good_seed)).cpu().numpy()
z = base + (z - base)*0.5
return z
def generate_from_z(self,z,truncation=0.5,resize=(500,500),normalize=True):
self.model.truncation = truncation
z = self.normalize_z(z) if normalize else z
img = self.model.sample_np(z)
img = Image.fromarray((img * 255).astype(np.uint8)).resize(resize,Image.LANCZOS)
return img
def generate_image(self,seed):
torch.autograd.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
z = self.model.sample_latent(1, seed=seed).cpu().numpy()
return self.generate_from_z(z)
def generate_z(self,seed):
torch.autograd.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
return self.model.sample_latent(1, seed=seed).cpu().numpy()
def generate_transversal(self,output_dir,combinations,num):
torch.autograd.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
seeds = []
for i in num:
z = self.model.sample_latent(1, seed=i).cpu().numpy()
z = self.normalize_z(z)
seeds.append(z)
for combi in combinations:
seed_1 = seeds[combi[0]-1]
seed_2 = seeds[combi[1]-1]
step = 5
imgs = self.transverse_image(z_1=seed_1,
z_2=seed_2,
step=step)
seed_1_path = os.path.join(output_dir,self.name,'{}_{}.npy'.format(self.name,combi[0])).replace(os.sep,'/')
seed_2_path = os.path.join(output_dir,self.name,'{}_{}.npy'.format(self.name,combi[1])).replace(os.sep,'/')
np.save(os.path.splitext(seed_1_path)[0],seed_1)
# self.storage.child(seed_1_path).put(seed_1_path)
np.save(os.path.splitext(seed_2_path)[0],seed_2)
# self.storage.child(seed_2_path).put(seed_2_path)
for step,img in enumerate(imgs):
if step == 0:
name = '{}_{}.jpg'.format(self.name,combi[0],step)
elif step == (len(imgs)-1):
name = '{}_{}.jpg'.format(self.name,combi[1],step)
else:
name = '{}_{}_to_{}_{}.jpg'.format(self.name,combi[0],combi[1],step)
img_path = os.path.join(output_dir,self.name,name).replace(os.sep,'/')
img.save(img_path)
self.storage.child(img_path).put(img_path)
def transverse_image(self,z_1,z_2,step=5):
zs = []
for i in range(step):
z = z_1 + 1/step*i*(z_2-z_1)
zs.append(z)
return [self.generate_from_z(x,normalize=False) for x in zs]
def generate_style(self,z,data,output_dir):
id_style = data['name']
step = int(data['step'])
scale = int(data['scale'])
ls = int(data['layer_start'])
le = int(data['layer_end'])
rule = np.load(data['path'])
truncation = float(data['truncation'])
index = 0
output_dir = os.path.join(output_dir,self.name,id_style)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
for i in range(-step,step):
index = index + 1
z_styles = self.apply_rule(z,rule,i,scale,ls,le)
img = self.generate_from_z(z = z_styles,
truncation=truncation,
normalize = False)
name = '{}_step_{}.jpg'.format(id_style,index)
save_path = os.path.join(output_dir,name).replace(os.sep,'/')
img.save(save_path)
np.save(os.path.splitext(save_path)[0],z_styles)
self.storage.child(save_path).put(save_path)
# self.storage.child(os.path.splitext(save_path)[0]+'.npy').put(os.path.splitext(save_path)[0]+'.npy')
def apply_rule(self,z,rule,step,scale,ls,le):
z = [z]*self.model.get_max_latents()
for l in range(ls, le):
z[l] = z[l] + rule * step * scale
return z |
from kangrouter import KangRouterClient
import time
import os
apiKey = os.environ["API_KEY"]
licenseId = os.environ["LICENSE_ID"]
problem = {
"nbResources": 3,
"jobs": [
{
"jobId": "Job01",
"origLat": "38.674921",
"origLon": "-9.175401",
"destLat": "38.716860",
"destLon": "-9.162417",
"minStartTime": "13:00",
"maxStartTime": "13:15",
"minEndTime": "13:55",
"maxEndTime": "13:55",
"pickupDuration": 10,
"deliveryDuration": 10,
"cargoId": "Fernando Pessoa",
"consumptions": [
0,
1,
0
]
}
],
"vehicles": [
{
"vehicleId": "12-AS-46",
"depotLat": "38.806842",
"depotLon": "-9.382556",
"minStartTime": "07:00",
"maxEndTime": "22:00",
"maxWorkDuration": 540,
"capacities": [
2,
3,
0
],
"""
"breaks": [
{
"breakId": "Lunch",
"minStartTime": "12:00",
"maxEndTime": "14:00",
"duration": 60
}
],
"""
"overspeed": 1.25
}
]
}
def test():
api = KangRouterClient(apiKey,licenseId)
solverId = api.create(problem)
status = api.getStatus(solverId)
for i in range(10):
time.sleep(5)
status = api.getStatus(solverId)
if status["execStatus"]!="pending":
break
assert status["execStatus"]=="completed"
assert status["schedCost"]==70
solution = api.getSolution(solverId)
assert solution["type"]=="total"
assert len(solution["jobsScheduled"])==1
|
# -*- coding: utf-8 -*-
import urlparse
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase
from tests.factories import AuthUserFactory
from api.base.settings.defaults import API_BASE
class TestUsers(ApiTestCase):
def setUp(self):
super(TestUsers, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
def tearDown(self):
super(TestUsers, self).tearDown()
def test_returns_200(self):
res = self.app.get('/{}users/'.format(API_BASE))
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_find_user_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_two._id, ids)
def test_all_users_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_multiple_in_users(self):
url = "/{}users/?filter[full_name]=fred".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_single_user_in_users(self):
url = "/{}users/?filter[full_name]=my".format(API_BASE)
self.user_one.fullname = 'My Mom'
self.user_one.save()
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_find_no_user_in_users(self):
url = "/{}users/?filter[full_name]=NotMyMom".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_not_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_users_list_takes_profile_image_size_param(self):
size = 42
url = "/{}users/?profile_image_size={}".format(API_BASE, size)
res = self.app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['links']['profile_image']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert_equal(int(query_dict.get('s')[0]), size)
|
#!/bin/python3
import math
import os
import random
import re
import sys,array
# Complete the countApplesAndOranges function below.
def countApplesAndOranges(s, t, a, b, apples, oranges):
k1=s-a
k2=b-t
c1=0
c2=0
m=apples.__len__()
n=oranges.__len__()
for i in range(m):
if(apples[i]>=0):
if(apples[i]>=k1 and apples[i]<=(t-a)):
c1=c1+1
for j in range(n):
if(oranges[j]<=0):
v=abs(oranges[j])
if(v>=k2 and v<=(b-s)):
c2=c2+1
print(c1)
print(c2)
if __name__ == '__main__':
st = input().split()
s = int(st[0])
t = int(st[1])
ab = input().split()
a = int(ab[0])
b = int(ab[1])
mn = input().split()
m = int(mn[0])
n = int(mn[1])
apples = list(map(int, input().rstrip().split()))
oranges = list(map(int, input().rstrip().split()))
countApplesAndOranges(s, t, a, b, apples, oranges)
|
import inspect
import io
import multiprocessing
import os
import socket
import sys
import tempfile
import time
import unittest
import requests
import uvicorn
from dijkstar.graph import Graph
from dijkstar.server import utils
from dijkstar.server.client import Client
from dijkstar.server.conf import settings
class TestUtils(unittest.TestCase):
def test_import_object(self):
obj = utils.import_object("tests.test_server:TestUtils")
self.assertIs(obj, self.__class__)
def test_import_object_none(self):
obj = utils.import_object(None)
self.assertIsNone(obj)
def test_load_graph(self):
with tempfile.NamedTemporaryFile(suffix=".marshal") as file:
graph = Graph()
graph.add_edge(1, 2)
graph.marshal(file)
file.flush()
with utils.modified_settings(graph_file=file.name):
loaded_graph = utils.load_graph(settings)
self.assertEqual(loaded_graph, graph)
def test_load_no_graph(self):
graph = utils.load_graph(settings)
self.assertEqual(graph, Graph())
def test_modified_settings(self):
self.assertIsNone(settings.graph_file)
with utils.modified_settings(graph_file="test_graph_file.marshal"):
self.assertEqual(settings.graph_file, "test_graph_file.marshal")
self.assertIsNone(settings.graph_file)
class TestClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
def get_free_port():
sock = socket.socket()
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
# 1 - - - → 2
# | |
# | |
# ↓ ↓
# 3 - - - → 4
graph_file = tempfile.NamedTemporaryFile(delete=False, suffix=".marshal")
graph = Graph()
graph.add_edge(1, 2, 1)
graph.add_edge(1, 3, 1)
graph.add_edge(2, 4, 1)
graph.add_edge(3, 4, 1)
graph.marshal(graph_file)
graph_file.flush()
graph_file.close()
cls.host = "127.0.0.1"
cls.port = get_free_port()
cls.base_url = f"http://{cls.host}:{cls.port}"
cls.graph = graph
cls.graph_file = graph_file.name
cls.server_process = multiprocessing.Process(
target=uvicorn.run,
args=("dijkstar.server.app:app",),
kwargs={
"host": cls.host,
"port": cls.port,
"log_level": "error",
},
)
# XXX: This has to be set for Python 3.8 only (???)
os.environ["GRAPH_FILE"] = graph_file.name
with utils.modified_settings(graph_file=cls.graph_file):
cls.server_process.start()
attempts_left = 20
sleep_time = 0.1
total_seconds = int(round(attempts_left * sleep_time))
while attempts_left > 0:
try:
requests.get(cls.base_url)
except requests.ConnectionError:
attempts_left -= 1
if attempts_left > 0:
time.sleep(sleep_time)
else:
print(
f"WARNING: Failed to connect to server after {total_seconds} seconds",
file=sys.stderr,
)
for name in dir(cls):
if name.startswith("test_"):
attr = getattr(cls, name)
if inspect.isfunction(attr):
decorated = unittest.skip(
"Could not connect to client"
)(attr)
setattr(cls, name, decorated)
else:
break
@classmethod
def tearDownClass(cls):
cls.server_process.terminate()
cls.server_process.join()
os.remove(cls.graph_file)
def setUp(self):
self.client = self.make_client()
def make_client(self, base_url=None):
return Client(base_url or self.base_url)
def test_routes(self):
client = self.client
self.assertEqual(len(client.routes), 6)
self.assertIn("graph-info", client.routes)
self.assertIn("get-node", client.routes)
self.assertIn("get-edge", client.routes)
self.assertIn("find-path", client.routes)
def test_get_graph_info(self):
client = self.client
data = client.graph_info()
self.assertIn("edge_count", data)
self.assertIn("node_count", data)
self.assertEqual(data["edge_count"], 4)
self.assertEqual(data["node_count"], 4)
def test_load_graph(self):
client = self.client
message = client.load_graph()
self.assertEqual(message, f"Graph reloaded from {self.graph_file}")
def test_load_graph_from_file(self):
client = self.client
message = client.load_graph(file_name=self.graph_file)
self.assertEqual(message, f"Graph loaded from {self.graph_file}")
def test_load_graph_from_data(self):
client = self.client
file = io.BytesIO()
self.graph.marshal(file)
file.seek(0)
message = client.load_graph(graph_data=file.getvalue())
self.assertEqual(message, "Graph loaded from data")
def test_reload_graph(self):
client = self.client
message = client.reload_graph()
self.assertEqual(message, f"Graph reloaded from {self.graph_file}")
def test_get_node(self):
client = self.client
data = client.get_node(1)
self.assertEqual(data, self.graph[1])
def test_get_edge(self):
client = self.client
data = client.get_edge(1, 2)
self.assertEqual(data, self.graph.get_edge(1, 2))
def test_find_path(self):
client = self.client
data = client.find_path(1, 4)
self.assertIn("nodes", data)
self.assertIn("edges", data)
self.assertIn("costs", data)
self.assertIn("total_cost", data)
nodes = data["nodes"]
edges = data["edges"]
self.assertEqual(len(nodes), 3)
self.assertEqual(len(edges), 2)
self.assertEqual(nodes, [1, 2, 4])
self.assertEqual(edges, [1, 1])
def test_find_path_with_annex(self):
client = self.client
# Insert node between nodes 1 and 2 then find a path from that
# node (-1) to node 4.
data = client.find_path(
-1,
4,
annex_nodes=(1, 2),
annex_edges=(
(1, -1, 1.1),
(-1, 2, 1.2),
),
)
nodes = data["nodes"]
edges = data["edges"]
self.assertEqual(len(nodes), 3)
self.assertEqual(len(edges), 2)
self.assertEqual(nodes, [-1, 2, 4])
self.assertEqual(edges, [1.2, 1])
|
from .base import BaseEmbedding
from .feature_embedding import FeatureEmbedding
from .tabtransformer_embedding import TabTransformerEmbedding
|
# TODO: wird alle 30 minuten vom Main angekickt
# imports
from database import session, Strings, strings, String1, string1, String2, string2, String3, string3, Panels, panels
import time
# import quick2wire.i2c as i2c
import serial_interface
import threading
class InputHandler(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# 3 seconds read timeout for now
self.tty = serial_interface.SerialInterface(3, 0xdeadbeef)
# Issue a call for faulty data after 10 samples; just for simulation purpose
self.counter = 5
# function to write a database-item into the strings table
def write_string_into_database(self, strnumber, strcurrent):
newcurrent = Strings()
newcurrent.stringnumber = strnumber
newcurrent.stringcurrent = strcurrent
newcurrent.timestamp = time.time()
session.add(newcurrent)
session.flush()
# function to write a database-item into the panles table
def write_panel_into_database(self, serialnumber, stringnumber, voltage):
newvoltage = Panels()
newvoltage.stringnumber = stringnumber
newvoltage.serialnumber = serialnumber
newvoltage.voltage = voltage
newvoltage.timestamp = time.time()
session.add(newvoltage)
session.flush()
#TODO: überprüfen ob sn in stringsx table schon vorhanden falls nicht reinschreiben
# function to wirte a database item into the string1 table
# function to wirte a database item into the string2 table
def write_string2_into_database(self, serialnumber):
newstringitem = String2()
newstringitem.serialnumber = serialnumber
session.add(newstringitem)
session.flush()
# function to wirte a database item into the string3 table
def write_string3_into_database(self, serialnumber):
newstringitem = String3()
newstringitem.serialnumber = serialnumber
session.add(newstringitem)
session.flush()
# # function to get the current-values of a string
# def read_stringcurrents_i2c(self, stringnumber):
# address_read = 0b01101001
# address_write = 0b01101000
# register = 0
# if stringnumber == 1:
# register = 0b01100011
# elif stringnumber == 2:
# register = 0b01100101
# elif stringnumber == 3:
# register = 0b01100111
# with i2c.I2CMaster() as bus:
# read_results = bus.transaction(i2c.writing_bytes(address_write, register), i2c.reading(address_read, 2))
# received_data = read_results[0][0]
# # todo check crc
# # todo split the currentvalue from the rest of the data to return it
# return current
#
#
# # function to get the current voltage of one module
# def read_modulevoltage_i2c(self, stringnumber, serialnumber):
# # TODO: register
# register = 0
# address_read = 0
# address_write = 0
# if stringnumber == 1:
# address_read = 0b10011011
# address_write = 0b10011010
# elif stringnumber == 2:
# address_read = 0b10011001
# address_write = 0b10011000
# elif stringnumber == 3:
# address_read = 0b10010011
# address_write = 0b10010010
# with i2c.I2CMaster() as bus:
# read_results = bus.transaction(i2c.writing_bytes(address_write, register, bin(serialnumber)), i2c.reading(address_read, 2))
# received_data = read_results[0][0]
# #todo check CRC
# # todo split infos in SN and voltage
# return (sn,voltage)
# function to get the stringcurrents of all 3 strings and write them into the database
def stringcurrents_request(self):
stringnumbers = [1, 2, 3]
for stringnumber in stringnumbers:
current = read_stringcurrents_i2c(stringnumber)
# TODO: current in richtige form bringen
self.write_string_into_database(stringnumber, current)
# function that compares the sringcurrents and sets a flag if the deviation is too high
def string_compare(self):
# creates a list with the latest stringcurrent values for the 3 strings
# (inserts a zero if there is one a not existing string)
stringnumbers = [1, 2, 3]
latestlogs = []
for stringnumber in stringnumbers:
results = session.query(Strings).filter(strings.c.stringnumber == stringnumber).all()
if len(results) == 0:
latestlogs.append(0)
else:
latestlogs.append(results[-1])
# calculates the average of the existing stringcurrents
currents = []
# TODO: Abweichung definieren
percentage_deviation = 30
for log in latestlogs:
currents.append(log.stringcurrent)
average = (sum(currents))/(sum(1 for e in currents if e)) # ignores the zero if there is one
# sets the watch flag to all logs with a higher percentage_deviation than chosen
for log in latestlogs:
if (log.stringcurrent < average/100*(100-percentage_deviation)) | (log.stringcurrent > average/100*(100+percentage_deviation)):
log.flag_watch = True
session.flush()
else:
pass
# function to get the voltages of all the modules in the system and writes them into the database
def modulevoltage_request(self):
strings = []
strings.append(session.query(String1).all())
strings.append(session.query(String2).all())
strings.append(session.query(String3).all())
# Only for simulation purposes
if self.counter > 0:
self.counter -= 1;
else:
self.counter = 5
for string_id, string in enumerate(strings):
if string:
for item in string:
# TODO: change back to i2c; UART only for testing purposes
# (sn, voltage) = read_modulevoltage_i2c(1, item.serialnumber)
if self.counter > 0:
self.tty.request_good_fake_data(item.serialnumber)
else:
self.tty.request_bad_fake_data(item.serialnumber)
voltage = self.tty.wait_for_voltage(item.serialnumber)
# TODO: voltage in richtiges format anpassen
# TODO: timer auf antwort warten (wie realisieren)
# if voltage == 0: #TODO: muss noch angepasst werden
# # TODO: if keine antwort
# historycheck = session.query(Panels).filter((panels.c.serialnumber == item.serialnumber) & (panels.c.timestamp > (time.time() - 24*60*60))).all()
# if len(historycheck)==0:
# # TODO: melden auslösen
# pass #zu testzwecken - wieder rausnehmen
# else:
# pass
# # TODO: else antwort kommt
# else:
self.write_panel_into_database(item.serialnumber, string_id + 1, voltage)
latestlogs = []
values = []
for panel in string:
result = session.query(Panels).filter(panels.c.serialnumber == panel.serialnumber).order_by(panels.c.timestamp.desc()).first()
latestlogs.append(result)
values.append(result.voltage)
avg_value = sum(values) / len(values)
for log in latestlogs:
deviation = avg_value - log.voltage
log.deviation = deviation
# TODO: remove this ugly demo patch as it just instantly faults an isse when a module drops significantly (could be a 2sec shadow)
if(deviation > 0.2 * avg_value):
self.issue_fault(log.serialnumber)
session.flush()
def issue_fault(self, serial_number):
print('FAULT DETECTED', serial_number)
self.fault_callback(serial_number)
def set_fault_callback(self, callback):
self.fault_callback = callback
def stop(self):
self.running = False
def run(self):
self.running = True
while self.running:
self.modulevoltage_request()
#Wait for 3 Secs, then issue new sample
time.sleep(3)
# TODO: main sequence:
# stringcurrents_request()
# string_compare()
# input_handler.write_string1_into_database(0xdeadbeef)
# input_handler.write_string1_into_database(0xcafebabe)
# input_handler.write_string1_into_database(0xbadeaffe)
|
from .CountingGridModel import CountingGridModel
from .CountingGridModelWithGPU import CountingGridModelWithGPU
__all__ = ['CountingGridModel', 'CountingGridModelWithGPU']
|
import ijson
import pickle
import csv
import os
import sys
import gc
import time
from numpy import random
# Path to the 12gb arxiv dataset
path_dataset = "..\data\dblp.v12.json"
path_dataset_cities = "..\data\cities15000.txt"
# Path were we will store the dataset as a python dict
#path_pickled_dataset = "..\data\dblp.v12.small.pickle"
path_csv_folder = "..\data\CSV"
path_countries = "..\data\country.txt"
cities_blacklist = ["university"]
use_entire_dataset = False
acceptance_rate = 0.02
def load_countries():
'''
Updates the city blacklist with all names of countries (and their parts)
Returns code -> country dictionary
'''
print("Loading countries dataset and updating blacklist")
countries = []
code_to_country = {}
country_to_code = {}
with open(path_countries, encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
for line in lines:
split_line = line.split(" (")
country = split_line[0]
code = split_line[-1].split(")")[0]
code_to_country[code] = country
country_to_code[country] = code
countries.append(country)
for part_of_country_name in country.split(" "):
if part_of_country_name == "":
continue
cities_blacklist.append(clean_string_cities(part_of_country_name))
return code_to_country, country_to_code
def get_prefixes():
'''
Outputs and returns all prefixes in the data_set (except indexed_abstract)
'''
prefixes = []
with open(path_dataset, "rb") as input_file:
# load json iteratively
parser = ijson.parse(input_file)
for prefix, event, value in parser:
if prefix not in prefixes and "indexed_abstract" not in prefix:
prefixes.append(prefix)
print(prefix)
return prefixes
def main():
'''
Takes the big arxiv dataset and transforms it to a CSV that Neo4J can import as a knowledge graph
'''
code_to_country, country_to_code = load_countries()
dict = compress_to_dict(-1)
dict_all_cities, city_to_country, list_cities = load_cities_data()
# transform_dict_to_csv(dict_all_cities, city_to_country, country_to_code, list_cities, dict)
export_csv_for_pykeen(dict_all_cities, city_to_country, country_to_code, list_cities, dict)
def org_to_unique_city(org, dict_all_cities, city_to_country, country_to_code, max_words_in_city_name = 3):
'''
Given an org name, splits it into all single words and checks if any of them are a city
Retuns the unique name of that city and country code
'''
# Remove all non-alphabet characters
org_only_alphabet = ""
for i in org:
if i.isalpha():
org_only_alphabet += i
else:
org_only_alphabet += " "
org = org_only_alphabet
words = org.replace(",", " ").replace("\t", " ").split(" ")
words = [word for word in words if word != ""]
possible_names = []
for i in range(1, max_words_in_city_name + 1):
possible_names += string_to_windows(words, i)
found_city_name = False
unique_name = ""
country = ""
for possible_name in possible_names:
if(possible_name in dict_all_cities):
if possible_name in cities_blacklist:
continue
found_city_name = True
unique_name = dict_all_cities[possible_name]
country = city_to_country[unique_name]
if not found_city_name:
for possible_name in words:
if(possible_name in country_to_code):
country = country_to_code[possible_name]
return unique_name, country
def string_to_windows(input_list, windows_size):
'''
Slides a window over a list and returns everything inside that window
'''
output_list = []
for idx, element in enumerate(input_list):
if idx + windows_size > len(input_list):
break
else:
output_list.append(clean_string_cities(' '.join([input_list[i] for i in range(idx, idx + windows_size)])))
return output_list
def clean_string_cities(city):
return city.lower().replace(" ", "")
def ensure_is_int_or_empty(input):
if isinstance(input, int):
return input
elif isfloat(input):
return int(float(input))
elif isinstance(input, str) and input.isnumeric():
return input
return ""
def load_cities_data():
'''
Loads the cities15000 dataset
Returns: dict_all_cities, city_to_country, list_cities, list_unique_citynames
'''
# Contains all cities from the dataset, key is possible city names, returns unique city names
dict_all_cities = {}
# List of all unique city names
list_cities = []
# Dictionary
# Key: Unique city name
# Value: Country Code
city_to_country = {}
with open(path_dataset_cities, encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
for line in lines:
# First element is an id, get rid of it
row = line.split("\t")
unique_name = clean_string_cities(row[2])
if(unique_name in cities_blacklist):
continue
list_cities.append(unique_name)
names = [row[1], row[2]] + row[3].split(',')
country_idx = -1
for (idx, element) in enumerate(names):
if isfloat(element):
break
if(element not in dict_all_cities):
dict_all_cities[clean_string_cities(element)] = unique_name
city_to_country[unique_name] = row[8]
# Remove potential duplicates in list_cities
list_cities = list(dict.fromkeys(list_cities))
return dict_all_cities, city_to_country, list_cities
def isfloat(value):
'''
Check if a string is a float
Source: https://stackoverflow.com/a/20929881/6515970
'''
try:
float(value)
return True
except ValueError:
return False
def transform_dict_to_csv(dict_all_cities, city_to_country, country_to_code, list_cities, dataset):
'''
Takes the processed dataset and turns it into several CSVs
'''
papers = []
authors = []
keywords = []
paper_keyword_relations = []
paper_author_relations = []
authors_last_id = 0
authors_to_id = {}
keywords_last_id = 0
keywords_to_id = {}
authors_cities = []
print("Transform dictionary")
for (id,paper) in enumerate(dataset):
if(id % 5000 == 0):
print(f"\r>> Transformed {id/1e6} million entries ", end = '', flush=True)
papers.append([id, clean_string(paper["title"]), ensure_is_int_or_empty(paper["year"])])
for author in paper["authors"]:
org = clean_string(author["org"])
city, country = org_to_unique_city(org, dict_all_cities, city_to_country, country_to_code)
name = clean_string(author["name"])
identifier = name + " " + city
if identifier in authors_to_id:
author_id = authors_to_id[identifier]
else:
author_id = authors_last_id
authors.append([author_id, name, org, city, country])
authors_to_id[identifier] = author_id
authors_last_id += 1
if city != "":
authors_cities.append([author_id, city])
paper_author_relations.append([id, author_id])
for keyword in paper["keywords"]:
name = clean_string(keyword["name"])
weight = keyword["weight"]
if name in keywords_to_id:
keyword_id = keywords_to_id[name]
else:
keyword_id = keywords_last_id
keywords.append([keyword_id, name])
keywords_to_id[name] = keyword_id
keywords_last_id += 1
paper_keyword_relations.append([id, keyword_id, weight])
print("\nStoring CSVs:")
export_to_csv(papers, "papers")
export_to_csv(authors, "authors")
export_to_csv(keywords, "keywords")
export_to_csv(paper_author_relations, "paper_author")
export_to_csv(paper_keyword_relations, "paper_keyword")
export_to_csv(authors_cities, "authors_cities")
cities = list(map(lambda x: [x], list_cities))
countries = list(map(lambda x: [x], list(country_to_code.values())))
export_to_csv(cities, "cities")
export_to_csv(countries, "countries")
cities_countries = []
for city in list_cities:
cities_countries.append([city, city_to_country[city]])
export_to_csv(cities_countries, "cities_countries")
def export_csv_for_pykeen(dict_all_cities, city_to_country, country_to_code, list_cities, dataset):
'''
Takes the processed dataset and turns it one CSV for pykeen
'''
relations = []
cities = []
print("Transform dictionary")
for (id,paper) in enumerate(dataset):
if(id % 5000 == 0):
print(f"\r>> Transformed {id/1e6} million entries ", end = '', flush=True)
paper_name = ("(P){0} ({1})".format(clean_string(paper["title"]), ensure_is_int_or_empty(paper["year"])))
for author in paper["authors"]:
org = clean_string(author["org"])
city, _ = org_to_unique_city(org, dict_all_cities, city_to_country, country_to_code)
author_name = "(A){0}: {1} ({2})".format(clean_string(author["name"]),clean_string(author["org"]), city)
# Only store author information if we know their city
# Keep track of all cities so we can create the corresponding countries
if city != '':
city_name = "(CI)" + city
relations.append([paper_name, "AUTHOREDBY", author_name])
relations.append([paper_name, "WRITTENIN", city_name])
relations.append([author_name, "WORKSIN", city_name])
cities.append(city)
for keyword in paper["keywords"]:
keyword_name = clean_string(keyword["name"])
relations.append([paper_name, "ISABOUT", "(K)" + keyword_name])
# Remove duplicates from cities list
cities = list(dict.fromkeys(cities))
for city in cities:
relations.append(["(CI)" + city, "ISIN", "(CO)" + city_to_country[city]])
print("\n")
export_to_csv(relations, "relations_for_pykeen_final", '\t')
# These symbols might confuse the import into Neo4f
# we remove them
forbidden_symbols = [',', '"', "'", '`', '’', '´', "{", "}", '"', '“', '”', '\\', '$', '^', '\n']
def clean_string(string):
#for symbol in forbidden_symbols:
# string = string.replace(symbol, '')
#return string
return ''.join(list([i for i in string if i.isalpha() or i.isnumeric() or i == " " or i == ":" or i == "(" or i == ")"]))
def export_to_csv(data, name, delimiter = ","):
'''
Takes a list of lines and stores that as a csv in the csv folder under <name>.csv
'''
filename = os.path.join(path_csv_folder, name + ".csv")
print(f"Writing to {filename}")
with open(filename, mode='w', encoding='utf-8', errors='ignore') as file:
csv_writer = csv.writer(file, delimiter=delimiter, quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in data:
#print(row[1].encode('utf-8', 'ignore'))
csv_writer.writerow(row)
# Caution must accept is buggy. It is no always the case that all must accept authors / keywords get accepted
must_accept_authors = []
must_accept_keywords = ["Physics", "Knowledge graph", "Discrete mathematics", "Ontology", "Description logic", "Knowledge management"]
def compress_to_dict(max_lines_to_process = -1):
'''
Takes the big arxiv dataset and removes unneeded data and stores it as a dict
'''
data_set = []
processed_lines = 0
with open(path_dataset, "rb") as input_file:
parser = ijson.parse(input_file)
for prefix, event, value in parser:
if(processed_lines % 1e5 == 0):
print(f"\r>> Read {processed_lines/1e6} million lines", end = '', flush=True)
if(processed_lines == max_lines_to_process):
break
must_accept = False
if "indexed_abstract" not in prefix:
# Open or close a new paper
if prefix == "item" and event == "start_map":
curr_paper = {"title":"", "authors":[], "keywords":[], "arxiv_id":0, "year":0,
"venue":{"arxiv_id":0, "name":""}, "publisher":""}
elif prefix == "item" and event == "end_map":
if must_accept or use_entire_dataset or random.rand() <= acceptance_rate:
data_set.append(curr_paper)
# Keywords
elif prefix == "item.fos.item" and event == "start_map":
curr_keyword = {"name":"", "weight":0}
elif prefix == "item.fos.item" and event == "end_map":
curr_paper["keywords"].append(curr_keyword)
elif prefix == "item.fos.item.name":
if value in must_accept_keywords:
must_accept = True
curr_keyword["name"] = value
elif prefix == "item.fos.item.w":
curr_keyword["weight"] = float(value)
# Authors
elif prefix == "item.authors.item" and event == "start_map":
curr_author = {"name":"", "org":"", "arxiv_id":0}
elif prefix == "item.authors.item" and event == "end_map":
curr_paper["authors"].append(curr_author)
elif prefix == "item.authors.item.name":
if value in must_accept_authors:
must_accept = True
curr_author["name"] = value
elif prefix == "item.authors.item.org":
curr_author["org"] = value
elif prefix == "item.authors.item.id":
curr_author["arxiv_id"] = value
# Everything else
elif prefix == "item.publisher":
curr_paper["publisher"] = value
elif prefix == "item.title":
curr_paper["title"] = clean_string(value)
elif prefix == "item.id":
curr_paper["arxiv_id"] = value
elif prefix == "item.year":
curr_paper["year"] = value
elif prefix == "item.year":
curr_paper["year"] = value
elif prefix == "item.publisher":
curr_paper["publisher"] = value
elif prefix == "item.venue.raw":
curr_paper["venue"]["name"] = value
elif prefix == "item.venue.id":
curr_paper["venue"]["arxiv_id"] = value
processed_lines += 1
#print("\nStoring dictionary (pickle)")
# Store dataset as dict
#outputfile = open(path_pickled_dataset,'wb')
#pickle.dump(data_set,outputfile)
#outputfile.close()
return data_set
if __name__ == "__main__":
main()
def print_dataset():
'''
Prints the JSON (except indexed_abstract)
'''
with open(path_dataset, 'rb') as input_file:
parser = ijson.parse(input_file)
for prefix, event, value in parser:
if "indexed_abstract" not in prefix:
print('prefix={}, event={}, value={}'.format(prefix, event, value))
'''
Prefixes except indexed_abstract:
item
item.id
item.authors
item.authors.item
item.authors.item.name
item.authors.item.org
item.authors.item.id
item.title
item.year
item.n_citation
item.page_start
item.page_end
item.doc_type
item.publisher
item.volume
item.issue
item.doi
item.references
item.references.item
item.fos
item.fos.item
Contains a topic of the paper, e.g. "Computer Science" or "Communications protocol"
item.fos.item.name
item.fos.item.w
item.venue
item.venue.raw
item.venue.id
item.venue.type
'''
|
from torch.utils.tensorboard import SummaryWriter
class Logger:
def __init__(self, log_dir, logging_interval):
self.writer = SummaryWriter(log_dir)
self.logging_interval = logging_interval
self.counter = 0
@classmethod
def from_config(cls, config, name):
log_dir = config.logging_path.format(
ds_name=config.ds_name,
model_architecture=config.model_architecture,
name=name
)
return cls(log_dir=log_dir, logging_interval=config.logging_interval)
class BiGANLogger(Logger):
def __call__(self, epoch, step, disc_loss, gen_enc_loss, *args, **kwargs):
self.counter += 1
if step % self.logging_interval == 0:
self.writer.add_scalar(f'Loss/Disc', disc_loss, self.counter)
self.writer.add_scalar(f'Loss/GenEnc', gen_enc_loss, self.counter)
print(f"epoch {epoch}, disc_loss {disc_loss}, gen_enc_loss {gen_enc_loss}")
class GANLogger(Logger):
def __call__(self, epoch, step, disc_loss, gen_loss, gen_disc_acc,
disc_real_acc, disc_fake_acc, *args, **kwargs):
self.counter += 1
if step % self.logging_interval == 0:
self.writer.add_scalar(f'Loss/Disc', disc_loss, self.counter)
self.writer.add_scalar(f'Loss/Gen', gen_loss, self.counter)
self.writer.add_scalar(f'Gen/DiscFake', gen_disc_acc, self.counter)
self.writer.add_scalar(f'Disc/Real', disc_real_acc, self.counter)
self.writer.add_scalar(f'Disc/Fake', disc_fake_acc, self.counter)
print(f"epoch {epoch}, disc_loss {disc_loss}, gen_loss {gen_loss}")
print(f"gen_disc_acc {gen_disc_acc}, disc_real_acc {disc_real_acc}, disc_fake_acc {disc_fake_acc}")
class ClfLogger(Logger):
def __call__(self, epoch, loss, step, *args, **kwargs):
self.counter += 1
if step % self.logging_interval == 0:
self.writer.add_scalar('Loss', loss, self.counter)
print(f"epoch {epoch}, loss {loss}")
|
"""
https://pythontutor.com/visualize.html#code=class%20Solution%3A%0A%20%20%20%20def%20duplicateZeros%28self,%20arr%29%20-%3E%20None%3A%0A%20%20%20%20%20%20%20%20%22%22%22%0A%20%20%20%20%20%20%20%20Do%20not%20return%20anything,%20modify%20arr%20in-place%20instead.%0A%20%20%20%20%20%20%20%20%22%22%22%0A%20%20%20%20%20%20%20%20possible_dups%20%3D%200%0A%20%20%20%20%20%20%20%20length_%20%3D%20len%28arr%29%20-%201%0A%0A%20%20%20%20%20%20%20%20%23%20Find%20the%20number%20of%20zeros%20to%20be%20duplicated%0A%20%20%20%20%20%20%20%20for%20left%20in%20range%28length_%20%2B%201%29%3A%0A%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Stop%20when%20left%20points%20beyond%20the%20last%20element%20in%20the%20original%20list%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20which%20would%20be%20part%20of%20the%20modified%20list%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20left%20%3E%20length_%20-%20possible_dups%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20break%0A%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Count%20the%20zeros%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20arr%5Bleft%5D%20%3D%3D%200%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%23%20Edge%20case%3A%20This%20zero%20can't%20be%20duplicated.%20We%20have%20no%20more%20space,%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%23%20as%20left%20is%20pointing%20to%20the%20last%20element%20which%20could%20be%20included%20%20%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20if%20left%20%3D%3D%20length_%20-%20possible_dups%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Blength_%5D%20%3D%200%20%23%20For%20this%20zero%20we%20just%20copy%20it%20without%20duplication.%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20length_%20-%3D%201%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20possible_dups%20%2B%3D%201%0A%0A%20%20%20%20%20%20%20%20%23%20Start%20backwards%20from%20the%20last%20element%20which%20would%20be%20part%20of%20new%20list.%0A%20%20%20%20%20%20%20%20last%20%3D%20length_%20-%20possible_dups%0A%0A%20%20%20%20%20%20%20%20%23%20Copy%20zero%20twice,%20and%20non%20zero%20once.%0A%20%20%20%20%20%20%20%20for%20i%20in%20range%28last,%20-1,%20-1%29%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20arr%5Bi%5D%20%3D%3D%200%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Bi%20%2B%20possible_dups%5D%20%3D%200%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20possible_dups%20-%3D%201%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Bi%20%2B%20possible_dups%5D%20%3D%200%0A%20%20%20%20%20%20%20%20%20%20%20%20else%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Bi%20%2B%20possible_dups%5D%20%3D%20arr%5Bi%5D%0A%0A%0As%20%3D%20Solution%28%29%0As.duplicateZeros%28%5B1,0,2,3,0,4,5,0%5D%29%0A%20%20%20%20%20%20%20%20%20%20%20%20&cumulative=false&curInstr=8&heapPrimitives=nevernest&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false
Why backwards?
If we would start shifting from left to right, then we would be overwriting elements before we have had the chance to shift them
that is why we go backwards instead.
We make sure we have shifted out an element before we shift another one into its original position.
What is the correct shift distance?
The duplication of a zero pushes all elements to the right of it by one.
This means also that every element is shifted to the right as many times as there are zeroes to the left of it.
E.g. in the array [1,0,2,0,3] , 1 will not move, 2 will shift one position and 3 will shift two positions.
As we go backwards, every time we bypass a zero (and duplicate it), the shift distance decreases for the elements we haven't shifted yet, because there is one less zero in front of them.
Why the < n checks?
Shifts push some of the elements out of the array. We do the < n checks to make sure we write down only elements that are shifted to a valid position inside the array and we ignore the ones falling off the end
"""
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
"""
zeros = 0
for i in range(len(arr)):
if arr[i] == 0:
zeros += 1
for i in range(len(arr) - 1, -1 ,-1):
if i + zeros < len(arr):
arr[i + zeros] == arr[i]
if arr[i] == 0:
zeros -= 1
if i + zeros < len(arr):
arr[i + zeros] = 0
|
"""
Unit tests for data_collections module
"""
import unittest
import pytest
from sentinelhub import DataCollection, TestSentinelHub, SHConfig
from sentinelhub.constants import ServiceUrl
from sentinelhub.data_collections import DataCollectionDefinition
from sentinelhub.exceptions import SHDeprecationWarning
class TestDataCollectionDefinition(TestSentinelHub):
def test_repr(self):
definition = DataCollection.SENTINEL1_IW.value
representation = repr(definition)
self.assertTrue(isinstance(representation, str))
self.assertTrue(representation.count('\n') >= 5)
def test_derive(self):
definition = DataCollectionDefinition(
api_id='X',
wfs_id='Y'
)
derived_definition = definition.derive(wfs_id='Z')
self.assertEqual(derived_definition.api_id, 'X')
self.assertEqual(derived_definition.wfs_id, 'Z')
self.assertEqual(derived_definition.collection_type, None)
def test_compare(self):
def1 = DataCollectionDefinition(api_id='X', _name='A')
def2 = DataCollectionDefinition(api_id='X', _name='B')
self.assertEqual(def1, def2)
class TestDataCollection(TestSentinelHub):
def test_define(self):
for _ in range(3):
data_collection = DataCollection.define(
'NEW',
api_id='X',
sensor_type='Sensor',
bands=('B01',),
is_timeless=True
)
self.assertEqual(data_collection, DataCollection.NEW)
with self.assertRaises(ValueError):
DataCollection.define(
'NEW_NEW',
api_id='X',
sensor_type='Sensor',
bands=('B01',),
is_timeless=True
)
with self.assertRaises(ValueError):
DataCollection.define(
'NEW',
api_id='Y'
)
def test_define_from(self):
bands = ['B01', 'XYZ']
for _ in range(3):
data_collection = DataCollection.define_from(
DataCollection.SENTINEL5P,
'NEW_5P',
api_id='X',
bands=bands
)
self.assertEqual(data_collection, DataCollection.NEW_5P)
self.assertEqual(data_collection.api_id, 'X')
self.assertEqual(data_collection.wfs_id, DataCollection.SENTINEL5P.wfs_id)
self.assertEqual(data_collection.bands, tuple(bands))
def test_define_byoc_and_batch(self):
byoc_id = '0000d273-7e89-4f00-971e-9024f89a0000'
byoc = DataCollection.define_byoc(byoc_id, name=f'MY_BYOC')
batch = DataCollection.define_batch(byoc_id, name='MY_BATCH')
self.assertEqual(byoc, DataCollection.MY_BYOC)
self.assertEqual(batch, DataCollection.MY_BATCH)
for ds in [byoc, batch]:
self.assertTrue(ds.api_id.endswith(byoc_id))
self.assertEqual(ds.collection_id, byoc_id)
with self.assertWarns(SHDeprecationWarning):
byoc2 = DataCollection(byoc_id.replace('0', '1'))
self.assertTrue(byoc, byoc2)
def test_attributes(self):
ds = DataCollection.SENTINEL3_OLCI
for attr_name in ['api_id', 'catalog_id', 'wfs_id', 'service_url', 'bands', 'sensor_type']:
value = getattr(ds, attr_name)
self.assertNotEqual(value, None)
self.assertEqual(value, getattr(ds.value, attr_name))
ds = DataCollection.define('EMPTY')
for attr_name in ['api_id', 'catalog_id', 'wfs_id', 'bands']:
with self.assertRaises(ValueError):
getattr(ds, attr_name)
self.assertEqual(ds.service_url, None)
def test_sentine1_checks(self):
self.assertTrue(DataCollection.SENTINEL1_IW.is_sentinel1)
self.assertFalse(DataCollection.SENTINEL2_L1C.is_sentinel1)
self.assertTrue(DataCollection.SENTINEL1_IW_ASC.contains_orbit_direction('ascending'))
self.assertFalse(DataCollection.SENTINEL1_IW_DES.contains_orbit_direction('ascending'))
self.assertTrue(DataCollection.SENTINEL2_L2A.contains_orbit_direction('descending'))
def test_get_available_collections(self):
collections = DataCollection.get_available_collections()
self._check_collection_list(collections)
config = SHConfig()
config.sh_base_url = ServiceUrl.EOCLOUD
eocloud_collections = DataCollection.get_available_collections(config=config)
self._check_collection_list(eocloud_collections)
self.assertNotEqual(eocloud_collections, collections)
def _check_collection_list(self, collection_list):
self.assertTrue(isinstance(collection_list, list))
self.assertTrue(all(isinstance(data_collection, DataCollection) for data_collection in collection_list))
def test_data_collection_transfer_with_ray():
""" This tests makes sure that the process of transferring a custom DataCollection object to a Ray worker and back
works correctly.
"""
ray = pytest.importorskip('ray')
ray.init(log_to_driver=False)
collection = DataCollection.SENTINEL2_L1C.define_from('MY_NEW_COLLECTION', api_id='xxx')
collection_future = ray.remote(lambda x: x).remote(collection)
transferred_collection = ray.get(collection_future)
assert collection is transferred_collection
ray.shutdown()
if __name__ == '__main__':
unittest.main()
|
# 读写数据
import pickle
import os
from plugins import getNow
from plugins import logManage
key_allow: list = [
'#', '*',
',', ',',
'.', '。',
'!', '!',
'?', '?',
':', ':',
';', ';',
'+',
'-',
'/'
]
def save_obj(obj, name: str) -> None:
filePath = name + '.data'
with open(filePath, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name: str) -> dict:
filePath = name + '.data'
if not os.path.exists(filePath):
return {}
with open(filePath, 'rb') as f:
return pickle.load(f)
# 读取配置信息
def read_config():
filePath = 'data/config.txt'
config = {
'version': '0.5.5',
'qq': 0,
'name': '小柒',
'color': '天蓝色',
'age': 18,
'master': 0,
'master_right': [],
'administrator': [],
'contributor': [],
'RPG_administrator': [],
'blacklist_group': {},
'blacklist_member': {},
'test_group': []
}
if not os.path.exists(filePath):
with open(filePath, 'w', encoding='utf-8') as f:
f.write('version=0.5.5\n')
f.write('qq=0\n')
f.write('name=小柒\n')
f.write('color=天蓝色\n')
f.write('age=18\n')
f.write('master=1597867839\n')
f.write('masterRight=\n')
f.write('administrator=\n')
f.write('contributor=\n')
f.write('RPGadministrator=\n')
f.write('blacklistGroup=\n')
f.write('blacklistMember=\n')
f.write('testGroup=\n')
return config
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
datas = line.split('#')
if len(datas) < 1:
continue
pair = datas[0].split('=')
if len(pair) != 2:
continue
pair[0] = pair[0].strip()
pair[1] = pair[1].strip()
if pair[0] == 'version':
config['version'] = pair[1]
elif pair[0] == 'qq':
if pair[1].isdigit():
config['qq'] = int(pair[1])
elif pair[0] == 'name':
config['name'] = pair[1]
elif pair[0] == 'color':
config['color'] = pair[1]
elif pair[0] == 'age':
if pair[1].isdigit():
config['age'] = int(pair[1])
elif pair[0] == 'master':
if pair[1].isdigit():
config['master'] = int(pair[1])
elif pair[0] == 'masterRight':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
config['master_right'].append(int(i))
elif pair[0] == 'administrator':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
config['administrator'].append(int(i))
elif pair[0] == 'contributor':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
config['contributor'].append(int(i))
elif pair[0] == 'RPGadministrator':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
config['RPG_administrator'].append(int(i))
elif pair[0] == 'testGroup':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
config['test_group'].append(int(i))
elif pair[0] == 'blacklistGroup':
qq_list = pair[1].split(',')
for i in qq_list:
data_list = i.split('(')
if data_list[0].isdigit():
config['blacklist_group'][int(data_list[0])] = data_list[1][:-1]
elif pair[0] == 'blacklistMember':
qq_list = pair[1].split(',')
for i in qq_list:
data_list = i.split('(')
if data_list[0].isdigit():
config['blacklist_member'][int(data_list[0])] = data_list[1][:-1]
return config
# 保存配置信息
def save_config(config):
filePath = 'data/config.txt'
with open(filePath, 'w', encoding='utf-8') as f:
f.write('version=' + config['version'] + '\n')
f.write('qq=' + str(config['qq']) + '\n')
f.write('name=' + config['name'] + '\n')
f.write('color=' + config['color'] + '\n')
f.write('age=' + str(config['age']) + '\n')
f.write('master=' + str(config['master']) + '\n')
f.write('masterRight=' + list_string(config['master_right']) + '\n')
f.write('administrator=' + list_string(config['administrator']) + '\n')
f.write('contributor=' + list_string(config['contributor']) + '\n')
f.write('RPGadministrator=' + list_string(config['RPG_administrator']) + '\n')
f.write('blacklistGroup=' + dict_string(config['blacklist_group']) + '\n')
f.write('blacklistMember=' + dict_string(config['blacklist_member']) + '\n')
f.write('testGroup=' + list_string(config['test_group']) + '\n')
# 读取群
def read_group(group_id):
global key_allow
filePath = 'data/__GROUP__/' + str(group_id)
group = {
'config': {
'mute': False, # 是否禁言
'limit': False, # 是否限制
'nudge': True, # 是否开启戳一戳
'RPG': True, # 是否开启RPG
'limit_RPG': False, # 是否开启RPG限制
'curse': True, # 是否开启脏话
'image': False, # 是否开启图片搜索
'ai': False, # 是否开启ai
'autonomous_reply': True, # 是否开启自动回复(群内自定义的)
'repeat': True, # 是否开启自动加一
'TRPG': True, # 是否开启头骰娘
'clash': False, # 是否开启部落冲突查询
'clash_tag': '', # 部落标签
'key': ['.', '。', '*'], # 触发词
'reply_limit': 0, # 回复限制次数
'welcome': False, # 是否开启欢迎
'right_train': [], # 谁可以训练小柒
'right_activity': [], # 谁可以发起活动
'right_mute': [], # 谁可以禁言
'right_RPG': [], # 谁可以开关游戏
# =============================
'flash': False, # 解除闪照
'member_wather': False, # 群成员监控
'revoke': False, # 防止撤回
'automatic': False, # 自动审核
'pass': '' # 加群暗号
},
'key_reply': {
'key_at': {},
'key': {},
'question': {},
'question_at': {}
}, # 关键词回复
'welcome': None, # 欢迎语
'prohibited_word': [],
'statistics': {
'reply': [],
'RPG': [],
'card': []
},
'group': {}, # 分组信息
'date': getNow.toString()
}
if not os.path.exists(filePath + '.config'):
with open(filePath + '.config', 'w', encoding='utf-8') as f:
f.write('date=' + group['date'] + '\n')
f.write('mute=' + str(group['config']['mute']) + '\n')
f.write('limit=' + str(group['config']['limit']) + '\n')
f.write('nudge=' + str(group['config']['nudge']) + '\n')
f.write('TRPG=' + str(group['config']['TRPG']) + '\n')
f.write('RPG=' + str(group['config']['RPG']) + '\n')
f.write('RPGlimit=' + str(group['config']['limit_RPG']) + '\n')
f.write('curse=' + str(group['config']['curse']) + '\n')
f.write('image=' + str(group['config']['image']) + '\n')
f.write('ai=' + str(group['config']['ai']) + '\n')
f.write('autoReply=' + str(group['config']['autonomous_reply']) + '\n')
f.write('repeat=' + str(group['config']['repeat']) + '\n')
f.write('clash=' + str(group['config']['clash']) + '\n')
f.write('clashTag=' + str(group['config']['clash_tag']) + '\n')
f.write('welcome=' + str(group['config']['welcome']) + '\n')
f.write('key')
for i in group['config']['key']:
f.write('=' + i)
f.write('\n')
f.write('replyTimes=' + str(group['config']['reply_limit']) + '\n')
f.write('trainRight=' + list_string(group['config']['right_train']) + '\n')
f.write('activityRight=' + list_string(group['config']['right_activity']) + '\n')
f.write('muteRight=' + list_string(group['config']['right_mute']) + '\n')
f.write('gameRight=' + list_string(group['config']['right_RPG']) + '\n')
f.write('flash=' + str(group['config']['flash']) + '\n')
f.write('memberWather=' + str(group['config']['member_wather']) + '\n')
f.write('revoke=' + str(group['config']['revoke']) + '\n')
f.write('automatic=' + str(group['config']['automatic']) + '\n')
f.write('pass=' + str(group['config']['pass']) + '\n')
data = {
'key_reply': group['key_reply'],
'welcome': group['welcome'],
'prohibited_word': group['prohibited_word'],
'statistics': group['statistics'],
'group': group['group']
}
if not os.path.exists(filePath + '.data'):
save_obj(group, filePath)
else:
group = load_obj(filePath)
return group
if not os.path.exists(filePath + '.data'):
data = {
'key_reply': group['key_reply'],
'welcome': group['welcome'],
'prohibited_word': group['prohibited_word'],
'statistics': group['statistics'],
'group': group['group']
}
save_obj(group, filePath)
data = load_obj(filePath)
group['key_reply'] = data['key_reply']
group['welcome'] = data['welcome']
group['prohibited_word'] = data['prohibited_word']
group['statistics'] = data['statistics']
group['group'] = data['group']
with open(filePath + '.config', 'r', encoding='utf-8') as f:
lines = f.readlines()
group['config']['key'] = []
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
datas = line.split('#')
if len(datas) < 1:
continue
pair = datas[0].split('=')
if len(pair) < 2:
continue
pair[0] = pair[0].strip()
pair[1] = pair[1].strip().lower()
if pair[0] == 'date':
group['date'] = pair[1]
elif pair[0] == 'mute':
if pair[1] == 'true':
group['config']['mute'] = True
else:
group['config']['mute'] = False
elif pair[0] == 'limit':
if pair[1] == 'true':
group['config']['limit'] = True
else:
group['config']['limit'] = False
elif pair[0] == 'nudge':
if pair[1] == 'true':
group['config']['nudge'] = True
else:
group['config']['nudge'] = False
elif pair[0] == 'TRPG':
if pair[1] == 'true':
group['config']['TRPG'] = True
else:
group['config']['TRPG'] = False
elif pair[0] == 'RPG':
if pair[1] == 'true':
group['config']['RPG'] = True
else:
group['config']['RPG'] = False
elif pair[0] == 'RPGlimit':
if pair[1] == 'true':
group['config']['limit_RPG'] = True
else:
group['config']['limit_RPG'] = False
elif pair[0] == 'curse':
if pair[1] == 'true':
group['config']['curse'] = True
else:
group['config']['curse'] = False
elif pair[0] == 'image':
if pair[1] == 'true':
group['config']['image'] = True
else:
group['config']['image'] = False
elif pair[0] == 'ai':
if pair[1] == 'true':
group['config']['ai'] = True
else:
group['config']['ai'] = False
elif pair[0] == 'autoReply':
if pair[1] == 'true':
group['config']['autonomous_reply'] = True
else:
group['config']['autonomous_reply'] = False
elif pair[0] == 'repeat':
if pair[1] == 'true':
group['config']['repeat'] = True
else:
group['config']['repeat'] = False
elif pair[0] == 'clash':
if pair[1] == 'true':
group['config']['clash'] = True
else:
group['config']['clash'] = False
elif pair[0] == 'clashTag':
group['config']['clash_tag'] = pair[1].upper()
elif pair[0] == 'welcome':
if pair[1] == 'true':
group['config']['welcome'] = True
else:
group['config']['welcome'] = False
elif pair[0] == 'replyTimes':
if pair[1].isdigit():
group['config']['reply_limit'] = int(pair[1])
elif pair[0] == 'key':
for i in pair:
if i in key_allow and i not in group['config']['key']:
group['config']['key'].append(i)
elif pair[0] == 'trainRight':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
group['config']['right_train'].append(int(i))
elif pair[0] == 'activityRight':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
group['config']['right_activity'].append(int(i))
elif pair[0] == 'muteRight':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
group['config']['right_mute'].append(int(i))
elif pair[0] == 'gameRight':
qq_list = pair[1].split(',')
for i in qq_list:
if i.isdigit():
group['config']['right_RPG'].append(int(i))
elif pair[0] == 'flash':
if pair[1] == 'true':
group['config']['flash'] = True
else:
group['config']['flash'] = False
elif pair[0] == 'memberWather':
if pair[1] == 'true':
group['config']['member_wather'] = True
else:
group['config']['member_wather'] = False
elif pair[0] == 'revoke':
if pair[1] == 'true':
group['config']['revoke'] = True
else:
group['config']['revoke'] = False
elif pair[0] == 'automatic':
if pair[1] == 'true':
group['config']['automatic'] = True
else:
group['config']['automatic'] = False
elif pair[0] == 'pass':
pair = datas[0].split('=')
pair[1] = pair[1].strip()
group['config']['pass'] = pair[1]
return group
# 保存群
def save_group(group_id, config):
filePath = 'data/__GROUP__/' + str(group_id)
data = {
'key_reply': config['key_reply'],
'welcome': config['welcome'],
'prohibited_word': config['prohibited_word'],
'statistics': config['statistics'],
'group': config['group']
}
save_obj(data, filePath)
with open(filePath + '.config', 'w', encoding='utf-8') as f:
f.write('date=' + config['date'] + '\n')
f.write('mute=' + str(config['config']['mute']) + '\n')
f.write('limit=' + str(config['config']['limit']) + '\n')
f.write('nudge=' + str(config['config']['nudge']) + '\n')
f.write('TRPG=' + str(config['config']['TRPG']) + '\n')
f.write('RPG=' + str(config['config']['RPG']) + '\n')
f.write('RPGlimit=' + str(config['config']['limit_RPG']) + '\n')
f.write('curse=' + str(config['config']['curse']) + '\n')
f.write('image=' + str(config['config']['image']) + '\n')
f.write('ai=' + str(config['config']['ai']) + '\n')
f.write('autoReply=' + str(config['config']['autonomous_reply']) + '\n')
f.write('repeat=' + str(config['config']['repeat']) + '\n')
f.write('clash=' + str(config['config']['clash']) + '\n')
f.write('clashTag=' + str(config['config']['clash_tag']) + '\n')
f.write('welcome=' + str(config['config']['welcome']) + '\n')
f.write('key')
for i in config['config']['key']:
f.write('=' + i)
f.write('\n')
f.write('replyTimes=' + str(config['config']['reply_limit']) + '\n')
f.write('trainRight=' + list_string(config['config']['right_train']) + '\n')
f.write('activityRight=' + list_string(config['config']['right_activity']) + '\n')
f.write('muteRight=' + list_string(config['config']['right_mute']) + '\n')
f.write('gameRight=' + list_string(config['config']['right_RPG']) + '\n')
f.write('flash=' + str(config['config']['flash']) + '\n')
f.write('memberWather=' + str(config['config']['member_wather']) + '\n')
f.write('revoke=' + str(config['config']['revoke']) + '\n')
f.write('automatic=' + str(config['config']['automatic']) + '\n')
f.write('pass=' + str(config['config']['pass']) + '\n')
# 读取用户
def read_user(qq):
global key_allow
filePath = 'data/__USER__/' + str(qq)
user = {
'config': {
'ai': True,
'reputation': 5,
'clash_user_tag': [], # 玩家标签
'main_clash_user_tag': 0, # 默认玩家标签
'clash_tag': [], # 部落标签
'main_clash_tag': 0, # 默认部落标签
'key': []
},
'buffer': {
'id': 0,
'buffer': None,
'time': 'xx-xx-xx'
},
'statistics': {
'reply': [],
'RPG': [],
'card': []
},
'date': getNow.toString()
}
if not os.path.exists(filePath + '.config'):
with open(filePath + '.config', 'w', encoding='utf-8') as f:
f.write('ai=false\n')
f.write('reputation=5\n')
f.write('clashUserTag=\n')
f.write('mainClashUserTag=0\n')
f.write('clashTag=\n')
f.write('mainClashTag=0\n')
f.write('key=*=.=。\n')
f.write('date=' + user['date'] + '\n')
if not os.path.exists(filePath + '.data'):
config = {
'buffer': user['buffer'],
'statistics': user['statistics']
}
save_obj(config, filePath)
return user
if not os.path.exists(filePath + '.data'):
config = {
'buffer': user['buffer'],
'statistics': user['statistics']
}
save_obj(config, filePath)
config = load_obj(filePath)
user['buffer'] = config['buffer']
user['statistics'] = config['statistics']
with open(filePath + '.config', 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
datas = line.split('#')
if len(datas) < 1:
continue
pair = datas[0].split('=')
if len(pair) < 2:
continue
pair[0] = pair[0].strip()
pair[1] = pair[1].strip().lower()
if pair[0] == 'date':
user['date'] = pair[1]
elif pair[0] == 'clashUserTag':
for i in range(1, len(pair)):
if len(pair[i]) > 0:
user['config']['clash_user_tag'].append(pair[i].upper())
elif pair[0] == 'mainClashUserTag' and pair[1].isdigit():
user['config']['main_clash_user_tag'] = int(pair[1])
elif pair[0] == 'clashTag':
for i in range(1, len(pair)):
if len(pair[i]) > 0:
user['config']['clash_tag'].append(pair[i].upper())
elif pair[0] == 'mainClashTag' and pair[1].isdigit():
user['config']['main_clash_tag'] = int(pair[1])
elif pair[0] == 'ai':
if pair[1] == 'true':
user['config']['ai'] = True
else:
user['config']['ai'] = False
elif pair[0] == 'key':
for i in pair:
if i in key_allow and i not in user['config']['key']:
user['config']['key'].append(i)
return user
# 保存用户
def save_user(qq, user):
filePath = 'data/__USER__/' + str(qq)
config = {
'buffer': user['buffer'],
'statistics': user['statistics']
}
save_obj(config, filePath)
with open(filePath + '.config', 'w', encoding='utf-8') as f:
f.write('ai=' + str(user['config']['ai']) + '\n')
f.write('clashUserTag')
for i in range(0, len(user['config']['clash_user_tag'])):
f.write('=' + user['config']['clash_user_tag'][i])
f.write('\n')
f.write('mainClashUserTag=' + str(user['config']['main_clash_user_tag']) + '\n')
f.write('clashTag')
for i in range(0, len(user['config']['clash_tag'])):
f.write('=' + user['config']['clash_tag'][i])
f.write('\n')
f.write('mainClashTag=' + str(user['config']['main_clash_tag']) + '\n')
f.write('key')
for i in user['config']['key']:
f.write('=' + i)
f.write('\n')
f.write('date=' + user['date'] + '\n')
def read_statistics():
filePath = 'data/__LOG__/statistics.log'
statistics = {
'kick': 0, # 被踢出的次数
'quit': 0, # 退群次数
'mute': 0, # 禁言次数
'unmute': 0, # 解除禁言次数
'awaken': 0, # 唤醒次数
'help': 0, # 帮助调用次数
'base_function': 0, # 基础功能调动次数
'talk': 0, # talk模块调用次数
'clock_activity': 0, # 打卡、活动模块调用次数
'image_search': 0, # 图片搜索模块调用次数
'command': 0, # 命令模块调用次数
'operate': 0, # 管理员操作调用次数
'game': 0, # RPG游戏调用次数
'key_reply': 0, # 群内自定义关键词调用次数
'auto_repeat': 0, # 自动加一调用次数
'auto_reply': 0, # 自动回复调用次数
'clash': 0, # 部落冲突调用次数
'new_friend': 0, # 新的朋友
'new_group': 0, # 新加入的群
'message': 0, # 发送消息量
'last_minute': 0, # 上一分钟回复量
'nudge': 0 # 戳一戳
}
if not os.path.exists(filePath):
with open (filePath, 'w', encoding='utf-8') as f:
f.write('kick=0\n')
f.write('quit=0\n')
f.write('mute=0\n')
f.write('unmute=0\n')
f.write('awaken=0\n')
f.write('help=0\n')
f.write('base_function=0\n')
f.write('talk=0\n')
f.write('clock_activity=0\n')
f.write('image_search=0\n')
f.write('command=0\n')
f.write('operate=0\n')
f.write('game=0\n')
f.write('auto_repeat=0\n')
f.write('auto_reply=0\n')
f.write('clash=0\n')
f.write('new_friend=0\n')
f.write('new_group=0\n')
f.write('message=0\n')
f.write('last_minute=0\n')
f.write('nudge=0\n')
return statistics
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
datas = line.split('#')
if len(datas) < 1:
continue
pair = datas[0].split('=')
if len(pair) < 2:
continue
pair[0] = pair[0].strip()
pair[1] = pair[1].strip()
if statistics.__contains__(pair[0]) and pair[1].isdigit():
statistics[pair[0]] = int(pair[1])
return statistics
def save_statistics(statistics):
filePath = 'data/__LOG__/statistics.log'
with open(filePath, 'w', encoding='utf-8') as f:
f.write('kick=' + str(statistics['kick']) + '\n')
f.write('quit=' + str(statistics['quit']) + '\n')
f.write('mute=' + str(statistics['mute']) + '\n')
f.write('unmute=' + str(statistics['unmute']) + '\n')
f.write('awaken=' + str(statistics['awaken']) + '\n')
f.write('help=' + str(statistics['help']) + '\n')
f.write('base_function=' + str(statistics['base_function']) + '\n')
f.write('talk=' + str(statistics['talk']) + '\n')
f.write('clock_activity=' + str(statistics['clock_activity']) + '\n')
f.write('image_search=' + str(statistics['image_search']) + '\n')
f.write('command=' + str(statistics['command']) + '\n')
f.write('operate=' + str(statistics['operate']) + '\n')
f.write('game=' + str(statistics['game']) + '\n')
f.write('auto_repeat=' + str(statistics['auto_repeat']) + '\n')
f.write('auto_reply=' + str(statistics['auto_reply']) + '\n')
f.write('clash=' + str(statistics['clash']) + '\n')
f.write('new_friend=' + str(statistics['new_friend']) + '\n')
f.write('new_group=' + str(statistics['new_group']) + '\n')
f.write('message=' + str(statistics['message']) + '\n')
f.write('last_minute=' + str(statistics['last_minute']) + '\n')
f.write('nudge=' + str(statistics['nudge']) + '\n')
def list_string(data):
init = False
ans = ''
for i in data:
if init:
ans += ','
else:
init = True
ans += str(i)
return ans
def dict_string(data):
init = False
ans = ''
for key, value in data.items():
if init:
ans += ','
else:
init = True
ans += str(key) + '(' + value + ')'
return ans
def read_weather():
filePath = 'data/Function/Weather/weather.txt'
weather = {}
if not os.path.exists(filePath):
logManage.log(getNow.toString(), '天气文件缺失!')
return weather
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
datas = line.split('#')
if len(datas) < 1:
continue
pair = datas[0].split('=')
if len(pair) != 2:
continue
pair[0] = pair[0].strip()
pair[1] = pair[1].strip()
weather[pair[0]] = pair[1]
return weather
def read_clock():
filePath = 'data/ClockActivity/clockIn'
clock = {}
if not os.path.exists(filePath + '.data'):
save_obj(clock, filePath)
else:
clock = load_obj(filePath)
return clock
def save_clock(clock):
filePath = 'data/ClockActivity/clockIn'
save_obj(clock, filePath)
def read_luck():
filePath = 'data/luck'
luck = {
'luck': {},
'luckDate': 'xx-xx-xx'
}
if not os.path.exists(filePath + '.data'):
save_obj(luck, filePath)
else:
luck = load_obj(filePath)
return luck
def save_luck(luck):
filePath = 'data/luck'
save_obj(luck, filePath)
def read_screen_word():
filePath = 'data/screenWords.txt'
screen = []
if not os.path.exists(filePath):
with open(filePath , 'w', encoding='utf-8') as f:
f.write('\n')
logManage.log(getNow.toString(), '屏蔽词文件缺失')
return screen
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line) > 0:
screen.append(line)
return screen
def save_screen_word(screen):
filePath = 'data/screenWords.txt'
with open(filePath , 'w', encoding='utf-8') as f:
for i in screen:
f.write(i + '\n')
if __name__ == '__main__':
print('读写数据')
|
#!/usr/bin/env python
import argparse
import gzip
import os
from collections import OrderedDict
import yaml
from Bio.SeqIO.QualityIO import FastqGeneralIterator
OUTPUT_DBKEY_DIR = 'output_dbkey'
OUTPUT_METRICS_DIR = 'output_metrics'
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def get_dbkey(dnaprints_dict, key, s):
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
d = dnaprints_dict.get(key, {})
for data_table_value, v_list in d.items():
if s in v_list:
return data_table_value
return ""
def get_dnaprints_dict(dnaprint_fields):
# A dndprint_fields entry looks something liek this.
# [['AF2122', '/galaxy/tool-data/vsnp/AF2122/dnaprints/NC_002945v4.yml']]
dnaprints_dict = {}
for item in dnaprint_fields:
# Here item is a 2-element list of data
# table components, # value and path.
value = item[0]
path = item[1].strip()
with open(path, "rt") as fh:
# The format of all dnaprints yaml
# files is something like this:
# brucella:
# - 0111111111111111
print_dict = yaml.load(fh, Loader=yaml.Loader)
for print_dict_k, print_dict_v in print_dict.items():
dnaprints_v_dict = dnaprints_dict.get(print_dict_k, {})
if len(dnaprints_v_dict) > 0:
# dnaprints_dict already contains k (e.g., 'brucella',
# and dnaprints_v_dict will be a dictionary # that
# looks something like this:
# {'NC_002945v4': ['11001110', '11011110', '11001100']}
value_list = dnaprints_v_dict.get(value, [])
value_list = value_list + print_dict_v
dnaprints_v_dict[value] = value_list
else:
# dnaprints_v_dict is an empty dictionary.
dnaprints_v_dict[value] = print_dict_v
dnaprints_dict[print_dict_k] = dnaprints_v_dict
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
return dnaprints_dict
def get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum):
if brucella_sum > 3:
group = "Brucella"
dbkey = get_dbkey(dnaprints_dict, "brucella", brucella_string)
elif bovis_sum > 3:
group = "TB"
dbkey = get_dbkey(dnaprints_dict, "bovis", bovis_string)
elif para_sum >= 1:
group = "paraTB"
dbkey = get_dbkey(dnaprints_dict, "para", para_string)
else:
group = ""
dbkey = ""
return group, dbkey
def get_oligo_dict():
oligo_dict = {}
oligo_dict["01_ab1"] = "AATTGTCGGATAGCCTGGCGATAACGACGC"
oligo_dict["02_ab3"] = "CACACGCGGGCCGGAACTGCCGCAAATGAC"
oligo_dict["03_ab5"] = "GCTGAAGCGGCAGACCGGCAGAACGAATAT"
oligo_dict["04_mel"] = "TGTCGCGCGTCAAGCGGCGTGAAATCTCTG"
oligo_dict["05_suis1"] = "TGCGTTGCCGTGAAGCTTAATTCGGCTGAT"
oligo_dict["06_suis2"] = "GGCAATCATGCGCAGGGCTTTGCATTCGTC"
oligo_dict["07_suis3"] = "CAAGGCAGATGCACATAATCCGGCGACCCG"
oligo_dict["08_ceti1"] = "GTGAATATAGGGTGAATTGATCTTCAGCCG"
oligo_dict["09_ceti2"] = "TTACAAGCAGGCCTATGAGCGCGGCGTGAA"
oligo_dict["10_canis4"] = "CTGCTACATAAAGCACCCGGCGACCGAGTT"
oligo_dict["11_canis"] = "ATCGTTTTGCGGCATATCGCTGACCACAGC"
oligo_dict["12_ovis"] = "CACTCAATCTTCTCTACGGGCGTGGTATCC"
oligo_dict["13_ether2"] = "CGAAATCGTGGTGAAGGACGGGACCGAACC"
oligo_dict["14_63B1"] = "CCTGTTTAAAAGAATCGTCGGAACCGCTCT"
oligo_dict["15_16M0"] = "TCCCGCCGCCATGCCGCCGAAAGTCGCCGT"
oligo_dict["16_mel1b"] = "TCTGTCCAAACCCCGTGACCGAACAATAGA"
oligo_dict["17_tb157"] = "CTCTTCGTATACCGTTCCGTCGTCACCATGGTCCT"
oligo_dict["18_tb7"] = "TCACGCAGCCAACGATATTCGTGTACCGCGACGGT"
oligo_dict["19_tbbov"] = "CTGGGCGACCCGGCCGACCTGCACACCGCGCATCA"
oligo_dict["20_tb5"] = "CCGTGGTGGCGTATCGGGCCCCTGGATCGCGCCCT"
oligo_dict["21_tb2"] = "ATGTCTGCGTAAAGAAGTTCCATGTCCGGGAAGTA"
oligo_dict["22_tb3"] = "GAAGACCTTGATGCCGATCTGGGTGTCGATCTTGA"
oligo_dict["23_tb4"] = "CGGTGTTGAAGGGTCCCCCGTTCCAGAAGCCGGTG"
oligo_dict["24_tb6"] = "ACGGTGATTCGGGTGGTCGACACCGATGGTTCAGA"
oligo_dict["25_para"] = "CCTTTCTTGAAGGGTGTTCG"
oligo_dict["26_para_sheep"] = "CGTGGTGGCGACGGCGGCGGGCCTGTCTAT"
oligo_dict["27_para_cattle"] = "TCTCCTCGGTCGGTGATTCGGGGGCGCGGT"
return oligo_dict
def get_seq_counts(value, fastq_list, gzipped):
count = 0
for fastq_file in fastq_list:
if gzipped:
with gzip.open(fastq_file, 'rt') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
else:
with open(fastq_file, 'r') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
return(value, count)
def get_species_counts(fastq_list, gzipped):
count_summary = {}
oligo_dict = get_oligo_dict()
for v1 in oligo_dict.values():
returned_value, count = get_seq_counts(v1, fastq_list, gzipped)
for key, v2 in oligo_dict.items():
if returned_value == v2:
count_summary.update({key: count})
count_list = []
for v in count_summary.values():
count_list.append(v)
brucella_sum = sum(count_list[:16])
bovis_sum = sum(count_list[16:24])
para_sum = sum(count_list[24:])
return count_summary, count_list, brucella_sum, bovis_sum, para_sum
def get_species_strings(count_summary):
binary_dictionary = {}
for k, v in count_summary.items():
if v > 1:
binary_dictionary.update({k: 1})
else:
binary_dictionary.update({k: 0})
binary_dictionary = OrderedDict(sorted(binary_dictionary.items()))
binary_list = []
for v in binary_dictionary.values():
binary_list.append(v)
brucella_binary = binary_list[:16]
brucella_string = ''.join(str(e) for e in brucella_binary)
bovis_binary = binary_list[16:24]
bovis_string = ''.join(str(e) for e in bovis_binary)
para_binary = binary_list[24:]
para_string = ''.join(str(e) for e in para_binary)
return brucella_string, bovis_string, para_string
def output_dbkey(file_name, dbkey, output_file):
# Output the dbkey.
with open(output_file, "w") as fh:
fh.write("%s" % dbkey)
def output_files(fastq_file, count_list, group, dbkey, dbkey_file, metrics_file):
base_file_name = get_sample_name(fastq_file)
output_dbkey(base_file_name, dbkey, dbkey_file)
output_metrics(base_file_name, count_list, group, dbkey, metrics_file)
def output_metrics(file_name, count_list, group, dbkey, output_file):
# Output the metrics.
with open(output_file, "w") as fh:
fh.write("Sample: %s\n" % file_name)
fh.write("Brucella counts: ")
for i in count_list[:16]:
fh.write("%d," % i)
fh.write("\nTB counts: ")
for i in count_list[16:24]:
fh.write("%d," % i)
fh.write("\nPara counts: ")
for i in count_list[24:]:
fh.write("%d," % i)
fh.write("\nGroup: %s" % group)
fh.write("\ndbkey: %s\n" % dbkey)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dnaprint_fields', action='append', dest='dnaprint_fields', nargs=2, required=False, default=None, help="List of dnaprints data table value, name and path fields")
parser.add_argument('--read1', action='store', dest='read1', required=True, default=None, help='Required: single read')
parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
parser.add_argument('--gzipped', action='store_true', dest='gzipped', default=False, help='Input files are gzipped')
parser.add_argument('--output_dbkey', action='store', dest='output_dbkey', required=True, default=None, help='Output reference file')
parser.add_argument('--output_metrics', action='store', dest='output_metrics', required=True, default=None, help='Output metrics file')
args = parser.parse_args()
fastq_list = [args.read1]
if args.read2 is not None:
fastq_list.append(args.read2)
# The value of dnaprint_fields is a list of lists, where each list is
# the [value, name, path] components of the vsnp_dnaprints data table.
# The data_manager_vsnp_dnaprints tool assigns the dbkey column from the
# all_fasta data table to the value column in the vsnp_dnaprints data
# table to ensure a proper mapping for discovering the dbkey.
dnaprints_dict = get_dnaprints_dict(args.dnaprint_fields)
# Here fastq_list consists of either a single read
# or a set of paired reads, producing single outputs.
count_summary, count_list, brucella_sum, bovis_sum, para_sum = get_species_counts(fastq_list, args.gzipped)
brucella_string, bovis_string, para_string = get_species_strings(count_summary)
group, dbkey = get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum)
output_files(args.read1, count_list, group, dbkey, dbkey_file=args.output_dbkey, metrics_file=args.output_metrics)
|
import os
import torch
from .base_synthesizer import BaseSynthesizer
class SwapSynthesizer(BaseSynthesizer):
@staticmethod
def add_commandline_args(parser):
return parser
def prepare_synthesis(self):
print("Preparing swapping visualization ...")
if self.folders is None:
print("'folder' is None.")
return self._is_available
self.contents = self.get_dataset(self.folders[0], force_square=False)
self.styles = self.get_dataset(self.folders[1], force_square=True)
result_dir = os.path.join(self.run_dir, "swap")
os.makedirs(result_dir, exist_ok=True)
self.filename = os.path.join(result_dir, "grid.png")
print("Done!")
self._is_available = True
return self._is_available
@torch.no_grad()
def synthesize(self, model, *args, **kwargs):
print("Synthesizing images using style code swapping ...")
device = model.device
content_images = []
for content_image in self.contents:
content_images.append(content_image.unsqueeze(0).to(device))
grid = [torch.ones_like(content_images[0])]
style_images = []
for style_image in self.styles:
style_images.append(style_image)
style_images = torch.stack(style_images).to(device)
grid.append(style_images)
for content_image in content_images:
inputs = content_image.repeat(style_images.size(0), 1, 1, 1)
outputs = model.synthesize(inputs, style_images)
grid.append(content_image)
grid.append(outputs)
grid = torch.cat(grid)
nrow=style_images.size(0) + 1
self.save_image(grid, self.filename, nrow=nrow)
|
"""Taskrunners"""
from .base import Taskrunner
from .coroutine import CoroutineTaskrunner
from .decorators import as_task
from .multi_dispatch import TaskDispatcher
from .multiprocess import MultiprocessTaskrunner
from .store_writer import StoreTaskWriter
from .store_reader import StoreTaskReader
from .thread import ThreadTaskrunner
from .utils import run_process
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import re
class LogcatSymbolicator(object):
CLASS_REGEX = re.compile(r"\b[A-Za-z][0-9A-Za-z_$]*\.[0-9A-Za-z_$.]+\b")
TRACE_REGEX = re.compile(
r"^(?P<prefix>.*)\s+at (?P<class>[A-Za-z][0-9A-Za-z_$]*\.[0-9A-Za-z_$.]+)"
r"\.(?P<method>[0-9A-Za-z_$<>]+)\(((Unknown Source)?:(?P<lineno>\d+))?\)\s*\n",
re.MULTILINE,
)
def __init__(self, symbol_maps):
self.symbol_maps = symbol_maps
self.pending_switches = []
def class_replacer(self, matchobj):
m = matchobj.group(0)
if m in self.symbol_maps.class_map:
return self.symbol_maps.class_map[m].origin_class
return m
def find_case_positions(self, start, pattern_id):
count_positions = self.symbol_maps.line_map.get_stack(start)
assert len(count_positions) == 1
assert count_positions[0].method == "redex.$Position.count"
# The cases are stored in the immediately following lines,
# and are ordered by pattern-id, so we can do a binary search.
end = start + count_positions[0].line
start = start + 1
while start <= end:
middle = (start + end) // 2
case_positions = self.symbol_maps.line_map.get_stack(middle)
assert len(case_positions) >= 1
assert case_positions[0].method == "redex.$Position.case"
if case_positions[0].line == pattern_id:
case_positions.pop(0)
return case_positions
elif case_positions[0].line < pattern_id:
start = middle + 1
else:
end = middle - 1
return None
# If there's no debug info item, stack traces have no line number e.g.
# at X.OPu.A04()
# Just deobfuscate the class/method name
def line_replacer_no_lineno(self, matchobj):
class_name = matchobj.group("class")
method_name = matchobj.group("method")
if class_name in self.symbol_maps.class_map:
class_map = self.symbol_maps.class_map[class_name]
deobf_class_name = class_map.origin_class
deobf_method_name = class_map.method_mapping[method_name]
return "%s\tat %s.%s()\n" % (
matchobj.group("prefix"),
deobf_class_name,
deobf_method_name,
)
return matchobj.string
def line_replacer(self, matchobj):
if not matchobj.group("lineno"):
return self.line_replacer_no_lineno(matchobj)
lineno = int(matchobj.group("lineno"))
cls = matchobj.group("class")
logging.debug("Starting with %s:%d", cls, lineno)
if self.symbol_maps.iodi_metadata is not None:
mapped_lineno, _ = self.symbol_maps.iodi_metadata.map_iodi(
self.symbol_maps.debug_line_map,
cls,
matchobj.group("method"),
lineno,
)
lineno = mapped_lineno if mapped_lineno else lineno
logging.debug("IODI mapped_lineno=%s lineno=%d", mapped_lineno, lineno)
positions = self.symbol_maps.line_map.get_stack(lineno - 1)
if cls in self.symbol_maps.class_map:
cls = self.symbol_maps.class_map[cls]
logging.debug("Class-map: cls=%s", cls)
result = ""
while positions:
pos = positions.pop(0)
if pos.method == "redex.$Position.switch":
self.pending_switches.append(
{"prefix": matchobj.group("prefix"), "line": pos.line}
)
logging.debug(
"Switch position: %s %d", matchobj.group("prefix"), pos.line
)
elif pos.method == "redex.$Position.pattern":
logging.debug("Switch pattern: %d", pos.line)
pattern_id = pos.line
if self.pending_switches:
ps = self.pending_switches.pop()
case_positions = self.find_case_positions(ps["line"], pattern_id)
if case_positions:
case_positions.extend(positions)
positions = case_positions
continue
result += "%s\t$(unresolved switch %d)\n" % (
matchobj.group("prefix"),
pattern_id,
)
elif pos.method is None:
logging.debug("Position without method")
result += "%s\tat %s.%s(%s:%d)\n" % (
matchobj.group("prefix"),
cls,
matchobj.group("method"),
pos.file,
pos.line,
)
else:
logging.debug(
"Position with method: %s/%s:%d", pos.method, pos.file, pos.line
)
result += "%s\tat %s(%s:%d)\n" % (
matchobj.group("prefix"),
pos.method,
pos.file,
pos.line,
)
return result
def symbolicate(self, line):
line = self.CLASS_REGEX.sub(self.class_replacer, line)
line = self.TRACE_REGEX.sub(self.line_replacer, line)
return line
@staticmethod
def is_likely_logcat(line):
return line.startswith("--------- beginning of") or re.match(
r"[A-Z]/[A-Za-z0-9_$](\s*\d+):", line
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
print 'os.name =',os.name
print 'os.uname=',os.uname()
print 'os.environ=',os.environ
print 'os.getenv(path)=',os.getenv('path')
#操作文件和目录的函数一部分在os中,一部分在os.path中
#查看当前目录的绝对路径
print '查看当前目录的绝对路径',os.path.abspath('.')
#在某个目录下面创建一个新目录
#首先先把要创建的新目录的完整路径写出来:
os.path.join('/Users/mac','test')
#然后创建一个目录
os.mkdir('/Users/mac/test')
#删除一个目录
os.rmdir('/Users/mac/test')
#注意把两个路径合成一个时,不要直接拼接字符串,而是使用join方式,这在不同操作系统都适用
#同样,拆分路径也需要使用特定函数
os.path.split('/Users/mac/file.txt')
#结果为:('/Users/mac','file.txt')
#获取文件扩展名
os.path.splitext('/Users/mac/file.txt')
#结果为:('/Users/mac/file','.txt')
#重命名
os.rename('test.txt','test.py')
#删除文件
os.remove('test.py') |
from manimlib_cairo.imports import *
class Logo(VGroup):
CONFIG = {
'color_1': [WHITE, BLUE_B, BLUE_D],
'color_2': [WHITE, '#C59978', '#8D5630'],
# 'color_3': [average_color("#CCCCCC", BLUE_C), BLUE_C, BLUE_D],
# 'color_4': [average_color("#CCCCCC", "#C59978"), '#C59978', '#8D5630'],
'color_3': [average_color(WHITE, BLUE_C), BLUE_C, BLUE_D],
'color_4': [average_color(WHITE, "#C59978"), '#C59978', '#8D5630'],
'center': ORIGIN,
'size': 2,
'shift_out': ORIGIN,
'black_bg': True,
'add_bg_square': False,
}
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.create_logo()
def create_logo(self):
p1 = Polygon(ORIGIN, RIGHT, 2 * UP, stroke_width=0).set_fill(self.color_1[0], 1)
p2 = Polygon(1.5 * RIGHT, 3 * UR, 3 * UP, stroke_width=0).set_fill(self.color_1[1], 1)
p3 = Polygon(2 * RIGHT, 3 * RIGHT, 3 * RIGHT + 2 * UP, stroke_width=0).set_fill(self.color_1[2], 1)
if not self.black_bg:
p1.set_fill(self.color_3[0], 1), p2.set_fill(self.color_3[1], 1), p3.set_fill(self.color_3[2], 1)
self.bg = Square(stroke_width=0, fill_color=BLACK if self.black_bg else WHITE, fill_opacity=1).set_height(self.size * 2.5)
if self.add_bg_square:
self.add(self.bg)
self.part_ur = VGroup(p1, p2, p3).move_to([2.5, 1., 0] + self.shift_out)
self.part_ul = self.part_ur.copy().rotate(PI / 2, about_point=ORIGIN)
self.part_dl = self.part_ur.copy().rotate(PI, about_point=ORIGIN)
self.part_dr = self.part_ur.copy().rotate(3 * PI / 2, about_point=ORIGIN)
self.add(self.part_ur, self.part_ul, self.part_dl, self.part_dr)
self.set_height(self.size).move_to(self.center)
if self.black_bg:
self.part_ur[0].set_fill(self.color_2[0], 1), self.part_ur[1].set_fill(self.color_2[1], 1), self.part_ur[2].set_fill(self.color_2[2], 1)
else:
self.part_ur[0].set_fill(self.color_4[0], 1), self.part_ur[1].set_fill(self.color_4[1], 1), self.part_ur[2].set_fill(self.color_4[2], 1)
self.inner_triangles = VGroup(self.part_ur[0], self.part_ul[0], self.part_dl[0], self.part_dr[0])
self.mid_triangles = VGroup(self.part_ur[1], self.part_ul[1], self.part_dl[1], self.part_dr[1])
self.outer_triangles = VGroup(self.part_ur[2], self.part_ul[2], self.part_dl[2], self.part_dr[2])
class OpeningScene(Scene):
CONFIG = {
"camera_config": {
"background_color": "#333333",
},
"enable_caching": False,
}
def construct(self):
logo = Logo(size=8/3)
squares = VGroup(*[Polygon(ORIGIN, UR, UL), Polygon(ORIGIN, UL, DL), Polygon(ORIGIN, DL, DR), Polygon(ORIGIN, DR, UR),])
squares.set_fill(WHITE, 1).set_stroke(width=0.5, color=WHITE).rotate(np.arctan(0.5)).set_height(logo.inner_triangles.get_height())
for s in squares:
s.scale(0.8)
img = ImageMobject("Tony.png").set_height(2)
Group(logo, img).arrange(RIGHT, buff=1.5).center()
line = Line(UP, DOWN, stroke_width=8, color=WHITE).move_to(mid(logo.get_right(), img.get_left()))
line.set_length(1.4)
text = VGroup(
Text("Manim-Kindergarten", font="Orbitron bold", color=GREY_B),
Text("鹤翔万里", font="PangMenZhengDao", color=WHITE, size=2.2)
).arrange(DOWN, aligned_edge=LEFT, buff=0.1).next_to(img, buff=0.5)
text[0][0].set_color(logo.color_2[2])
text[0][6].set_color(logo.color_1[2])
all_logo = Group(logo, text, line, img).center()
text = Group(text, line, img)
bg = Rectangle(height=10, width=10, fill_color="#333333", fill_opacity=1, stroke_width=0)
bg.add_updater(lambda m: m.move_to(logo, aligned_edge=RIGHT).shift(RIGHT*0.2))
text.save_state()
text.shift((text.get_right()[0]-bg.get_right()[0]+0.2)*LEFT)
logo.save_state()
logo.move_to(ORIGIN)
logo.scale(1.5)
tris = logo.inner_triangles.copy().rotate(-PI)
self.add(text, bg)
self.wait(0.3)
self.add(tris)
self.wait(0.3)
self.remove(tris)
self.wait(0.2)
self.add(tris)
self.wait(0.15)
self.remove(tris)
self.wait(0.1)
self.add(tris)
self.wait(0.1)
self.remove(tris)
self.wait(0.075)
self.add(tris)
self.wait(0.075)
self.remove(tris)
self.wait(0.05)
self.add(tris)
self.wait(0.05)
self.remove(tris)
# square = Square().set_height(tris.get_height()).set_stroke(width=0.5, color=WHITE)
# self.play(ReplacementTransform(square, tris), run_time=1)
self.wait(0.2)
self.play(ShowSubmobjectsOneByOne(tris), rate_func=linear, run_time=0.4)
for i in tris:
self.add(i)
self.wait(0.1)
self.play(*[ReplacementTransform(tris[i], squares[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(tris, squares), rate_func=linear, run_time=0.8)
self.wait(0.1)
self.play(*[ReplacementTransform(squares[i], logo[i]) for i in range(4)],
rate_func=rush_from, run_time=0.6)
#self.play(ReplacementTransform(squares, logo), rate_func=linear, run_time=1.5)
self.wait(0.1)
self.play(
text.restore, logo.restore,
rate_func=rush_from, run_time=0.8
)
self.wait(1)
self.remove(bg)
self.play(FadeOut(Group(*self.mobjects))) |
path = "~/.calendars/*"
default_list = "Tasks"
date_format = "%d/%m/%Y"
time_format = "%H:%M"
default_due = 0
|
#!/usr/bin/python
import SocketServer, SimpleHTTPServer, argparse, json
parser = argparse.ArgumentParser()
parser.add_argument('--port', '-p', type=int, default=8008)
parser.add_argument('--log-request-body', action='store_true', default=False)
args = parser.parse_args()
class HTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
__not_found = "<body>Not found</body>"
def do_POST(self):
body = None
content_length = self.headers.getheader('content-length')
if content_length:
body = self.rfile.read(int(content_length))
if self.path == '/clientauth':
if body:
json_body = json.loads(body) # just check that json is paresable
if args.log_request_body:
print(body)
stream_url = json_body.get('url', '')
if stream_url == '/local/mp4/sample1.mp4/playlist.m3u8':
redirect_body = '{"return_code":302, "redirect_location":"http://127.0.0.1:8081/content/blocked.mp4/playlist.m3u8"}'
self.send_response(200)
self.send_header('Content-Length', len(redirect_body))
self.end_headers()
self.wfile.write(redirect_body)
elif stream_url == '/local/mp4/sample2.mp4/playlist.m3u8':
response_body = '{"return_code":403}'
self.send_response(200)
self.send_header('Content-Length', len(response_body))
self.end_headers()
self.wfile.write(response_body)
elif stream_url.endswith('/chunks.m3u8') or stream_url.endswith('/chunk.m3u8'):
redirect_location = 'http://' + json_body.get('host') + stream_url[:stream_url.rfind('/') + 1] + 'playlist.m3u8'
print(redirect_location)
redirect_body = '{"return_code":302, "redirect_location":"' + redirect_location + '"}'
self.send_response(200)
self.send_header('Content-Length', len(redirect_body))
self.end_headers()
self.wfile.write(redirect_body)
else:
user_agent = json_body.get('user_agent', None)
if user_agent == "BlockMe/1.0":
self.send_response(403)
self.send_header('Content-Length', 0)
self.end_headers() # it is enough to send 403 with empty body
return
referer = json_body.get('referer', None)
if referer == "http://block.me":
self.send_response(403)
self.send_header('Content-Length', 0)
self.end_headers() # it is enough to send 403 with empty body
return
body = '{"return_code":200}'
self.send_response(200)
self.send_header('Content-Length', len(body))
self.end_headers()
self.wfile.write(body)
else:
self.send_response(403)
self.send_header('Content-Length', 0)
self.end_headers()
else:
self.send_response(404)
self.send_header('Content-Length', len(self.__not_found))
self.end_headers()
self.wfile.write(self.__not_found)
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer(("", args.port), HTTPHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt, e:
pass
finally:
httpd.socket.close()
|
class circle():
def __init__(self,r):
self.radius = r
def circumference(self):
return self.radius*2*3.14
def area(self):
return self.radius**2*3.14
def volume(self):
return self.radius**3*(4/3)*3.14
x = circle(3)
print x.area()
print x.circumference()
print x.volume()
|
"""
This plugin does not perform ANY test: The aim is to visit all URLs grabbed so far and build
the transaction log to feed data to other plugins
NOTE: This is an active plugin because it may visit URLs retrieved by vulnerability scanner spiders
which may be considered sensitive or include vulnerability probing
"""
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Visit URLs found by other tools, some could be sensitive: need permission"
def run(PluginInfo):
urls = ServiceLocator.get_component("url_manager").GetURLsToVisit()
for url in urls: # This will return only unvisited urls
ServiceLocator.get_component("requester").GetTransaction(True, url) # Use cache if possible
Content = "%s URLs were visited" % str(len(urls))
OWTFLogger.log(Content)
return ServiceLocator.get_component("plugin_helper").HtmlString(Content)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 12 17:39:48 2022
@author: marco
"""
import pandas as pd
import numpy as np
import os
from scipy.linalg import pinv as pinv
from scipy.linalg import inv as inv
from sklearn.preprocessing import PolynomialFeatures
os.chdir('C://Users//marco//Desktop//Projects')
cwd = os.getcwd()
print("Current working directory: {0}".format(cwd))
import warnings # `do not disturbe` mode
warnings.filterwarnings('ignore')
dtafile = 'Data.xlsx'
df = pd.read_excel(dtafile, index_col=0, skiprows=0, na_values=('NE'),sheet_name='Univariate')
df2 = hamilton(h=8,p=4, df=df)
def hamilton(df,h,p):
df = df.copy()
df1 = pd.DataFrame() # Support matrix
for i in range(h,h+p):
tmp = df.iloc[:,:].shift(i)
tmp.columns = tmp.columns + '_'+ str(i)
df1 = pd.concat([df1,tmp], axis=1)
df1.columns = df1.columns.str.replace("_0", "")
df1.drop(columns=df.columns[1:], inplace=True)
df1 = pd.concat([df,df1], axis=1)
df1['cons']=1
return df1
df1 = df.copy()
df2 = df.copy()
y = df1.to_numpy()
n = y.shape[0]
k1 = np.eye(n,n)
k2 = np.eye(n, k=1) * -2
k3 = np.eye(n, k=2)
k = k1 + k2 + k3
k = k[:-2,:]
I = np.identity(n)
w1 = lam
t1 = inv (I + w1 * k.T @ k ) @ y
lamb = np.full(shape=n,fill_value=lam,dtype=np.int)
lamb[0]=lam*3
lamb[-1] = lam*3
lamb[1]=lam*(3/2)
lamb[-2] = lam*(3/2)
w2 = lamb.reshape(-1,1)
t2 = inv (I + w2 * k.T @ k ) @ y
df1['trend'] = t1
df1['cycle'] = df1['y']-df1['trend']
df2['trend_esc'] = t2
df2['cycle_esc'] = df2['y']-df2['trend_esc']
return (df1,df2) |
from django.db import models, transaction
from projects.models import Project
from organizations.models import Organization
class WorkstreamType(models.Model):
name = models.CharField(max_length=50)
organization = models.ForeignKey(Organization, null=True, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Workstream(models.Model):
"""
Abstract base class
"""
name = models.CharField(max_length=50)
description = models.CharField(max_length=50, null=True, blank=True)
objective = models.CharField(max_length=50, blank=True)
motivation = models.CharField(max_length=50, blank=True)
owner = models.CharField(max_length=50, blank=True)
timestamp = models.DateTimeField(auto_now_add=True, null=True)
category = models.ForeignKey(WorkstreamType, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
is_the_reference_workstream = models.BooleanField(default=False)
copied_from = models.ForeignKey('self', null=True, blank=True, on_delete=models.CASCADE,
related_name='copied_from_set')
def __str__(self):
return self.name
@property
def augmented_name(self):
return ''.join([self.name, ' (', self.project.name, ')'])
@property
def augmented_name2(self):
return ''.join([self.name, ' (', self.project.name, ')'])
def save(self, *args, **kwargs):
if not self.is_the_reference_workstream:
return super(Workstream, self).save(*args, **kwargs)
with transaction.atomic():
# make sure that no other workstreams of this type are tagged as reference workstream
# to prevent any problems related to this, make sure that is_the_reference_workstream isn't shown in any
# forms available to users.
Workstream.objects.filter(
is_the_reference_workstream=True, category=self.category).update(is_the_reference_workstream=False)
return super(Workstream, self).save(*args, **kwargs)
|
import os
import reprint
from fields.map import Map
from towers.tower import tower_dict, get_tower_info
from waves.monsters import PassedTheGateError
from waves.wave import EasyWave, HeavyWave
class Game:
def __init__(self):
self.gold = 100
self.lives = 30
self.points = 0
self.map = Map()
self.waves = [EasyWave(), HeavyWave()]
def build(self):
def print_current():
os.system("clear")
print("Buliding phase")
self.map.print_with_monits([f"Available gold: {self.gold}",
f"Lives: {self.lives}",
f"Points: {self.points}",
f"Towers price: {get_tower_info()}"])
while True:
print_current()
if input("Want to buy? y/n ") == "n":
break
tower_selection = input("Enter tower name ")
try:
tower = tower_dict[tower_selection.upper()]()
except KeyError:
print("Wrong letter")
continue
price = tower.price
if price > self.gold:
print("Not enough gold")
continue
while True:
row = int(input("Enter row number "))
col = int(input("Enter col number "))
try:
self.map.build_tower(tower, (row, col))
self.gold -= price
break
except ValueError:
print("This field is unavailable")
continue
print_current()
input("Press enter to start the battle")
def fight(self):
def get_current_status():
monsters_hp = []
for monster in self.map.monsters:
monsters_hp.append(str(monster.hp))
monsters_hp = " ".join(monsters_hp)
return f" Monsters hp: {monsters_hp}"
wave = self.waves.pop(0)
with reprint.output(output_type="dict", interval=0) as output_dict:
while True:
self.update()
wave.release(self.map)
wave.update()
rows = self.map.get_rows()
rows[0] += get_current_status()
rows[1] += f" Lives: {self.lives}"
for i, row in enumerate(rows):
output_dict[i] = "{}".format("".join(row))
if not self.map.monsters:
break
if self.lives == 0:
break
def update(self):
for _, field in self.map.wall.items():
if field.objects:
tower = field.objects[0]
tower.update()
for monster in self.map.monsters:
monster.update()
if monster.is_alive:
try:
monster.move()
except PassedTheGateError:
self.map.remove_monster(monster)
self.lives -= 1
else:
self.gold += monster.gold
self.points += monster.points
self.map.remove_monster(monster)
def run(self):
while self.waves:
self.build()
os.system("clear")
self.fight()
if __name__ == '__main__':
game = Game()
game.run()
|
from typing import Optional
from pydantic import BaseSettings, Field
class Config(BaseSettings):
ENVIRONMENT: str = Field(None, env="ENVIRONMENT")
JWT_SECRET: Optional[str]
DATA_HOST: Optional[str]
TPV: Optional[str]
TPV_USER: Optional[str]
TPV_CORE: Optional[str]
class Config:
env_file: str = "config.env"
class DevConfig(Config):
class Config:
env_prefix: str = "DEV_"
class ProdConfig(Config):
class Config:
env_prefix: str = "PROD_"
def get_config():
if "prod" == Config().ENVIRONMENT:
return ProdConfig()
else:
return DevConfig()
config = get_config()
|
#this function will return the price with a %10 discount
def discount(price):
result = 0.95*(price)
# print("price=%f, result=%f", price, result)
return result |
from __future__ import print_function
from collections import deque
class TrainEarlyStopOnLosses(object):
"""Estimate if early stop conditions on losses fullfiled
"""
def __init__(self, stop_conditions, max_mem, stop_on_all_conditions):
"""Initialize the TrainEarlyStopOnLosses( class
Parameters:
stop_conditions (dict) - defintion of stops conditions with entries name_of_loss : max_change or name_of_Loss:[min_value,max_value]
max_mem (int) - number of previous losses considered
stop_on_all_conditions - if stop is on all conditions fullfilled (otherwise on any)
"""
self.stop_conditions = stop_conditions
self.max_mem = max_mem
self.stop_on_all_conditions = stop_on_all_conditions
self.recent_losses = dict()
# check early stop, storage losses
def check(self, model):
"""checks if early stop
Parameters:
model
Returns:
boolean
True if early stop conditions fullfilled
"""
early_stop_flag = False
if len(self.stop_conditions) > 0:
losses = model.get_current_losses()
# check on start if properly configured
if len(self.recent_losses) == 0:
self. __check_config(losses)
stop_on_losses = []
for loss in self.stop_conditions:
stop_on_losses.append(False)
if loss not in self.recent_losses:
self.recent_losses[loss] = deque([], self.max_mem)
self.recent_losses[loss].append(losses[loss])
if len(self.recent_losses[loss]) >= self.max_mem:
min_mem_loss = max(self.recent_losses[loss])
max_mem_loss = min(self.recent_losses[loss])
# min max loss given
if isinstance(self.stop_conditions[loss], list) or isinstance(self.stop_conditions[loss], tuple):
if min_mem_loss > self.stop_conditions[loss][0] and max_mem_loss < self.stop_conditions[loss][1]:
stop_on_losses[-1] = True
# max diff given
if isinstance(self.stop_conditions[loss], float):
if abs(max_mem_loss - min_mem_loss) <= 2 * self.stop_conditions[loss]:
stop_on_losses[-1] = True
if self.stop_on_all_conditions:
if all(stop_on_losses):
print('all stop criteria fulfilled : stop')
early_stop_flag = True
else:
if any(stop_on_losses):
print('at least one stop criterion fulfilled : stop')
early_stop_flag = True
return early_stop_flag
# early stop config sanity check
def __check_config(self, losses):
if len(self.stop_conditions) > 0:
# early stop sanity check of conditions
if self.max_mem <= 0:
raise Exception("stop condition needs at least one prev data")
for loss in self.stop_conditions:
if loss not in losses:
raise Exception("unrecognized stop condition : %s" % format(loss, ))
# min max
if isinstance(self.stop_conditions[loss], list) or isinstance(self.stop_conditions[loss],
tuple):
if self.stop_conditions[loss][0] >= self.stop_conditions[loss][1]:
raise Exception(
"wrong stop condition : %s bounds must be in order and not equal" % format(loss, ))
if self.stop_conditions[loss][1] <= 0:
raise Exception(
"wrong stop condition : %s upper bound cannot be negative or zero" % format(loss, ))
# diff
if isinstance(self.stop_conditions[loss], float):
if self.stop_conditions[loss] <= 0:
raise Exception("wrong stop condition : %s cannot be negative or zero" % format(loss, ))
|
#!/usr/bin/env python3
#
# huecon.py
#
# Interactive command line Hue console
#
import argparse
import os
import sys
import config
import hue
import cli
# Config file location
CONFIG_FILE = os.path.expanduser("~/.huecon")
CLI_HISTORY_FILE = os.path.expanduser("~/.huecon_history")
CLI_DEF = {
"show|:Show various Hue system info": {
"lights:Show the lights": {
"None:Show summary": "show_lights",
"detail:Show detail": "show_lights_detail",
"name:Show specific light by name": {
"<light-name>:Show light with this name": "show_light",
},
"id:Show specific light by id": {
"<light-id>:Show light with this id": "show_light",
},
},
"scenes:Show the scenes": {
"None:Show summary": "show_scenes",
"detail:Show detail": "show_scenes",
"name:Show specific scene by name": {
"<scene-name>:Show scene with this name": "show_scene",
},
"id:Show specific scene by id": {
"<scene-id>:Show scene with this id": "show_scene",
},
},
"resourcelinks:Show the resourcelinks": {
"None:Show summary": "show_resourcelinks",
"name:Show specific resourcelink by name": {
"<rlink-name>:Show resourcelink with this name":
"show_resourcelink",
},
"id:Show specific resourcelink by id": {
"<rlink-id>:Show resourcelinl with this id":
"show_resourcelink",
},
},
"groups:Show the groups": {
"None:Show summary": "show_groups",
"name:Show specific group by name": {
"<group-name>:Show group with this name": "show_group",
},
"id:Show specific group by id": {
"<group-id>:Show group with this id": "show_group",
},
},
"sensors:Show the sensors": {
"None:Show summary": "show_sensors",
"name:Show specific sensor by name": {
"<sensor-name>:Show sensor with this name": "show_sensor",
},
"id:Show specific sensor by id": {
"<sensor-id>:Show sensor with this id": "show_sensor",
},
},
"rules:Show the rules": {
"None:Show summary": "show_rules",
"name:Show specific rule by name": {
"<rule-name>:Show rule with this name": "show_rule",
},
"id:Show specific rule by id": {
"<rule-id>:Show rule with this id": "show_rule",
},
},
"schedules:Show the schedules": {
"None:Show summary": "show_schedules",
"name:Show specific schedule by name": {
"<sched-name>:Show schedule with this name": "show_schedule",
},
"id:Show specific schedule by id": {
"<sched-id>:Show schedule with this id": "show_schedule",
},
},
"whitelist:Show the whitelist of users": "show_whitelist",
},
"light:Perform actions for a light": {
"id:Perform action for a light id": {
"<light-id>:Perform action for this light id": {
"on:Turn light on": "light_on",
"off:Turn light off": "light_off",
},
},
"name:Perform action for a light name": {
"<light-name>:Perform action for this light name": {
"on:Turn light on": "light_on",
"off:Turn light off": "light_off",
},
},
},
"group:Perform actions for a group": {
"id:Perform action for a group id": {
"<group-id>:Perform action for this group id": {
"on:Turn group on": "group_on",
"off:Turn group off": "group_off",
},
},
"name:Perform action for a group name": {
"<group-name>:Perform action for this group name": {
"on:Turn group on": "group_on",
"off:Turn group off": "group_off",
},
},
},
"exit:Exit Huecon": "do_exit",
}
BANNER = """
_ _ ____
| | | | _ _ ___ / ___| ___ _ __
| |_| || | | | / _ \| | / _ \ | '_ \\
| _ || |_| || __/| |___| (_) || | | |
|_| |_| \__,_| \___| \____|\___/ |_| |_|
"""
def exit_error(message):
print(message)
sys.exit(1)
class ObjectIDArg(cli.ArgumentDef):
def __init__(self, get_fn, arg_name):
self.get_fn = get_fn
super().__init__(arg_name + "-id", arg_name)
def complete(self, ctx, arg):
return [o.id for o in self.get_fn()
if o.id.startswith(arg)]
def process(self, ctx, arg):
# Find the object!
objects = {o.id: o for o in self.get_fn()}
try:
return objects[arg]
except KeyError as exc:
raise cli.ArgumentError("Unknown ID".format(self.name)) from exc
def help_options(self, ctx):
return [(o.id, o.name) for o in self.get_fn()]
class ObjectNameArg(cli.ArgumentDef):
def __init__(self, get_fn, arg_name):
self.get_fn = get_fn
super().__init__(arg_name + "-name", arg_name)
def splitline(self, ctx, arg):
# If there are quotes, then walk till we find last
if arg and arg[0] == '"':
if '" ' not in arg[1:]:
return arg, None
else:
end = arg[1:].index('" ')
return arg[:end + 2], arg[end + 2:].lstrip()
else:
return super().splitline(ctx, arg)
def complete(self, ctx, arg):
return ['"{}"'.format(o.name)
for o in self.get_fn()
if '"{}"'.format(o.name).startswith(arg)]
def process(self, ctx, arg):
# Find the object!
objects = {o.name: o for o in self.get_fn()}
# Strip quotes
if len(arg) > 1 and arg[0] == '"' and arg[-1] == '"':
arg = arg[1:-1]
try:
return objects[arg]
except KeyError as exc:
raise cli.ArgumentError("Unknown name".format(self.name),
arg) from exc
class HueCon(cli.Interface):
intro = BANNER + '\n\nType help or ? to list commands.\n'
prompt = '(huecon) '
def __init__(self, bridge_address=None):
# Load config file
self.config_file = config.Config(CONFIG_FILE)
# Connect to bridge
self.bridge = self._connect_to_bridge(bridge_address)
# Create argument definitions
arg_defs = {}
for name, arg_name in (("light", None),
("scene", None),
("group", None),
("sensor", None),
("rule", None),
("schedule", "sched"),
("resourcelink", "rlink")):
if arg_name is None:
arg_name = name
func = getattr(self.bridge, "get_{}s".format(name))
arg_defs["<{}-id>".format(arg_name)] = ObjectIDArg(func, name)
arg_defs["<{}-name>".format(arg_name)] = ObjectNameArg(func, name)
super().__init__(CLI_DEF, arg_defs, CLI_HISTORY_FILE)
# ------------------------------------------------------------------------
# Utils
# ------------------------------------------------------------------------
def _connect_to_bridge(self, bridge_address):
# Get known bridges
known_bridges = {bid: user
for bid, user in self.config_file.get_bridges()}
if bridge_address is None:
address = input("Enter hue bridge host: ")
else:
address = bridge_address
# Create a bridge
try:
bridge = hue.Bridge(address)
except hue.Error as exc:
exit_error("Bad bridge address: {!s}".format(exc))
print("Connected to bridge '{}'".format(bridge.name))
if bridge.id not in known_bridges:
print("Bridge not known, registering with bridge")
input("Press bridge button then press enter to continue...")
try:
username = bridge.register("huecon")
except hue.Error as exc:
exit_error("Failed to register with bridge: {!s}".format(exc))
# Update config
self.config_file.add_bridge(bridge.id, username)
self.config_file.write_file()
else:
username = known_bridges[bridge.id]
print("Logging in...")
try:
bridge.auth(username)
except hue.Error as exc:
exit_error("Failed to connect to bridge: {!s}".format(exc))
return bridge
def _print_light(self, light):
print(light.name)
print(" ID:", light.id)
print(" Reachable:", bool_str(light.is_reachable))
print(" On:", bool_str(light.is_on))
print(" Brightness: {}%".format(light.state.bri * 100
/ hue.common.MAX_BRIGHTNESS))
print(" Hue:", light.state.hue)
print(" Saturation:", light.state.sat)
print(" Effect:", light.state.effect)
def _print_scene(self, scene):
print(scene.name)
print(" ID: {}".format(scene.id))
print(" Lights:")
print(" " + "\n ".join(l.name for l in scene.lights))
print(" Last updated: {!s}".format(scene.last_updated))
print(" Recycle: {}".format(bool_str(scene.recycle)))
print(" Locked: {}".format(bool_str(scene.locked)))
print(" Owner: {}".format(scene.owner))
# ------------------------------------------------------------------------
# Action functions
# ------------------------------------------------------------------------
def show_lights(self, ctx):
print("Lights:")
for light in self.bridge.get_lights():
print(" {} (state: {}, id: {})".format(light.name,
light.state_str,
light.id))
def show_light(self, ctx):
self._print_light(ctx.args['light'])
def show_lights_detail(self, ctx):
print("Detailed lights info")
for light in self.bridge.get_lights():
print("")
self._print_light(light)
def light_on(self, ctx):
light = ctx.args["light"]
print("Turning light '{}' on".format(light.name))
light.turn_on()
def light_off(self, ctx):
light = ctx.args["light"]
print("Turning light '{}' off".format(light.name))
light.turn_off()
def show_scenes(self, ctx):
print("Scenes:")
scenes = self.bridge.get_scenes()
maxlen = max(len(s.name) for s in scenes)
for scene in scenes:
if "detail" in ctx.kws:
print("")
self._print_scene(scene)
else:
print(" {:{}} (id: {})".format(scene.name, maxlen, scene.id))
def show_scene(self, ctx):
self._print_scene(ctx.args['scene'])
def show_resourcelinks(self, ctx):
print("Resourcelinks:")
for rlink in self.bridge.get_resourcelinks():
print(" {} (id: {})".format(rlink.name, rlink.id))
print(" '{}'".format(rlink.description))
def show_resourcelink(self, ctx):
rlink = ctx.args['resourcelink']
print(rlink.name)
print(" Description: {}".format(rlink.description))
print(" ID: {}".format(rlink.id))
print(" Recycle: {}".format(bool_str(rlink.recycle)))
print(" Links:")
objects = rlink.links
maxlen = max(len(type(obj).__name__) + len(obj.name) + 3
for obj in objects)
for obj in objects:
name = "{} '{}'".format(type(obj).__name__,
obj.name)
print(" {:{}} (id: {})".format(name, maxlen, obj.id))
def show_groups(self, ctx):
print("Groups:")
groups = self.bridge.get_groups()
maxlen = max(len(group.name) for group in groups)
for group in groups:
print(" {:{}} (state: {}, type: {}, id: {})"
.format(group.name, maxlen, group.state_str,
group.type, group.id))
def show_group(self, ctx):
group = ctx.args['group']
print(group.name)
print(" ID: {}".format(group.id))
print(" Type: {}".format(group.type))
print(" Class: {}".format(group.group_class))
print(" State: {}".format(group.state_str))
print(" Recycle: {}".format(bool_str(group.recycle)))
print(" Lights:")
for light in group.lights:
print(" {} ({})".format(light.name, light.state_str))
def show_sensors(self, ctx):
print("Sensors:")
sensors = self.bridge.get_sensors()
maxlen = max(len(sensor.name) for sensor in sensors)
for sensor in sensors:
print(" {:{}} (type: {}, state: {}, id: {})"
.format(sensor.name, maxlen,
sensor.type_str,
sensor.state_str, sensor.id))
def show_sensor(self, ctx):
sensor = ctx.args['sensor']
print(sensor.name)
print(" ID: {}".format(sensor.id))
print(" Type: {}".format(sensor.type_str))
print(" State: {}".format(sensor.state_str))
print(" Last updated: {!s}".format(sensor.last_updated))
print(" Recycle: {}".format(bool_str(sensor.recycle)))
def show_rules(self, ctx):
print("Rules:")
rules = self.bridge.get_rules()
maxlen = max(len(rule.name) for rule in rules)
for rule in rules:
print(" {:{}} (id: {})"
.format(rule.name, maxlen, rule.id))
def show_rule(self, ctx):
rule = ctx.args['rule']
print(rule.name)
print(" ID: {}".format(rule.id))
print(" Status: {}".format(rule.status))
print(" Owner: {}".format(rule.owner))
print(" Last triggered: {!s}".format(rule.last_triggered))
print(" Times triggered: {}".format(rule.times_triggered))
print(" Conditions:")
for cond in rule.conditions:
print(" {!s}".format(cond))
print(" Actions:")
for act in rule.actions:
print(" {!s}".format(act))
def show_schedules(self, ctx):
print("Schedules:")
scheds = self.bridge.get_schedules()
maxlen = max(len(sched.name) for sched in scheds)
for sched in scheds:
print(" {:{}} (id: {}, enabled: {})"
.format(sched.name, maxlen, sched.id,
bool_str(sched.is_enabled)))
def show_schedule(self, ctx):
sched = ctx.args['schedule']
print(sched.name)
print(" ID: {}".format(sched.id))
print(" Enabled: {}".format(bool_str(sched.is_enabled)))
print(" Timer time: {!s}".format(sched.timer_time))
print(" Created: {!s}".format(sched.created_time))
print(" Start time: {!s}".format(sched.start_time))
print(" Command:\n {!s}".format(sched.command_action))
print(" Auto-delete: {}".format(sched.auto_delete))
print(" Recycle: {}".format(sched.recycle))
def show_whitelist(self, ctx):
print("User whitelist (ordered by last used):")
for user in sorted(self.bridge.get_whitelist().values(),
key=lambda u: u.last_used.datetime,
reverse=True):
print(" {}".format(user.name))
print(" Created: {!s}, Last used: {!s}"
.format(user.created, user.last_used))
def group_on(self, ctx):
group = ctx.args["group"]
print("Turning group '{}' on".format(group.name))
group.turn_on()
def group_off(self, ctx):
group = ctx.args["group"]
print("Turning group '{}' off".format(group.name))
group.turn_off()
def do_exit(self, ctx):
print("Bye!")
ctx.end = True
def bool_str(val):
if val is None:
return "--"
elif val:
return "Yes"
else:
return "No"
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Interactive console for managing Hue lights")
parser.add_argument("-b", "--bridge", help="Connect to this bridge")
args = parser.parse_args()
HueCon(args.bridge)
|
"""Train dann."""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from core.test import test
from core.test_weight import test_weight
from utils.utils import save_model
import torch.backends.cudnn as cudnn
import math
cudnn.benchmark = True
def weight(ten, a=10):
a = torch.tensor(a, device=ten.device)
return (torch.atan(a*(ten-0.5)) +
torch.atan(0.5*a))/(2*torch.atan(a*0.5))
def lipton_weight(ten, beta = 4):
order = torch.argsort(ten)
return (order < len(ten)/(1+beta)).float()
def get_quantile(ten, a = 0.5):
return torch.kthvalue(ten,math.floor(len(ten)*a))[0]
def train_dann(model, params, src_data_loader, tgt_data_loader, src_data_loader_eval, tgt_data_loader_eval, num_src, num_tgt, device, logger):
"""Train dann."""
####################
# 1. setup network #
####################
# setup criterion and optimizer
if not params.finetune_flag:
print("training non-office task")
# optimizer = optim.SGD(model.parameters(), lr=params.lr, momentum=params.momentum, weight_decay=params.weight_decay)
optimizer = optim.Adam(model.parameters(), lr=params.lr)
else:
print("training office task")
parameter_list = [{
"params": model.features.parameters(),
"lr": 0.001
}, {
"params": model.fc.parameters(),
"lr": 0.001
}, {
"params": model.bottleneck.parameters()
}, {
"params": model.classifier.parameters()
}, {
"params": model.discriminator.parameters()
}]
optimizer = optim.SGD(parameter_list, lr=0.01, momentum=0.9)
criterion0 = nn.CrossEntropyLoss(reduction = 'mean')
criterion = nn.CrossEntropyLoss(reduction = 'none')
weight_src = torch.ones(num_src).to(device)
weight_tgt = torch.ones(num_tgt).to(device)
####################
# 2. train network #
####################
global_step = 0
for epoch in range(params.num_epochs):
# set train state for Dropout and BN layers
model.train()
# zip source and target data pair
len_dataloader = min(len(src_data_loader), len(tgt_data_loader))
data_zip = enumerate(zip(src_data_loader, tgt_data_loader))
for step, ((images_src, class_src, idx_src), (images_tgt, _, idx_tgt)) in data_zip:
p = float(step + epoch * len_dataloader) / \
params.num_epochs / len_dataloader
alpha = 2. / (1. + np.exp(-10 * p)) - 1
# if params.lr_adjust_flag == 'simple':
# lr = adjust_learning_rate(optimizer, p)
# else:
# lr = adjust_learning_rate_office(optimizer, p)
# logger.add_scalar('lr', lr, global_step)
# prepare domain label
size_src = len(images_src)
size_tgt = len(images_tgt)
label_src = torch.zeros(size_src).long().to(device) # source 0
label_tgt = torch.ones(size_tgt).long().to(device) # target 1
# make images variable
class_src = class_src.to(device)
images_src = images_src.to(device)
images_tgt = images_tgt.to(device)
# zero gradients for optimizer
optimizer.zero_grad()
# train on source domain
src_class_output, src_domain_output = model(input_data=images_src, alpha=alpha)
src_loss_class = criterion0(src_class_output, class_src)
if params.run_mode in [0,2]:
src_loss_domain = criterion0(src_domain_output, label_src)
else:
src_loss_domain = criterion(src_domain_output, label_src)
prob = torch.softmax(src_domain_output.data, dim = -1)
if params.soft:
if params.quantile:
weight_src[idx_src] = (torch.sort(prob[:,1])[1]).float().detach()
else:
weight_src[idx_src] = weight(prob[:,1]).detach()
else:
if params.quantile:
weight_src[idx_src] = (prob[:,0] < \
get_quantile(prob[:,0],params.threshold[0])).float().detach()
else:
weight_src[idx_src] = (prob[:,0] < params.threshold[0]).float().detach()
src_loss_domain = torch.dot(weight_src[idx_src], src_loss_domain
)/ torch.sum(weight_src[idx_src])
#train on target domain
_, tgt_domain_output = model(input_data=images_tgt, alpha=alpha)
if params.run_mode in [0,1]:
tgt_loss_domain = criterion0(tgt_domain_output, label_tgt)
else:
tgt_loss_domain = criterion(tgt_domain_output, label_tgt)
prob = torch.softmax(tgt_domain_output.data, dim = -1)
if params.soft:
if params.quantile:
weight_tgt[idx_tgt] = (torch.sort(prob[:,0])[1]).float().detach()
else:
weight_tgt[idx_tgt] = weight(prob[:,0]).detach()
else:
if params.quantile:
weight_tgt[idx_tgt] = (prob[:,1] < \
get_quantile(prob[:,1],params.threshold[1])).float().detach()
else:
weight_tgt[idx_tgt] = (prob[:,1] < params.threshold[1]).float().detach()
tgt_loss_domain = torch.dot(weight_tgt[idx_tgt], tgt_loss_domain
) / torch.sum(weight_tgt[idx_tgt])
loss = src_loss_class + src_loss_domain + tgt_loss_domain
if params.src_only_flag:
loss = src_loss_class
# optimize dann
loss.backward()
optimizer.step()
global_step += 1
# print step info
logger.add_scalar('src_loss_class', src_loss_class.item(), global_step)
logger.add_scalar('src_loss_domain', src_loss_domain.item(), global_step)
logger.add_scalar('tgt_loss_domain', tgt_loss_domain.item(), global_step)
logger.add_scalar('loss', loss.item(), global_step)
if ((step + 1) % params.log_step == 0):
print(
"Epoch [{:4d}/{}] Step [{:2d}/{}]: src_loss_class={:.6f}, src_loss_domain={:.6f}, tgt_loss_domain={:.6f}, loss={:.6f}"
.format(epoch + 1, params.num_epochs, step + 1, len_dataloader, src_loss_class.data.item(),
src_loss_domain.data.item(), tgt_loss_domain.data.item(), loss.data.item()))
# eval model
if ((epoch + 1) % params.eval_step == 0):
src_test_loss, src_acc, src_acc_domain = test_weight(model, src_data_loader_eval, device, flag='source')
tgt_test_loss, tgt_acc, tgt_acc_domain = test_weight(model, tgt_data_loader_eval, device, flag='target')
logger.add_scalar('src_test_loss', src_test_loss, global_step)
logger.add_scalar('src_acc', src_acc, global_step)
logger.add_scalar('src_acc_domain', src_acc_domain, global_step)
logger.add_scalar('tgt_test_loss', tgt_test_loss, global_step)
logger.add_scalar('tgt_acc', tgt_acc, global_step)
logger.add_scalar('tgt_acc_domain', tgt_acc_domain, global_step)
# save model parameters
if ((epoch + 1) % params.save_step == 0):
save_model(model, params.model_root,
params.src_dataset + '-' + params.tgt_dataset + "-dann-{}.pt".format(epoch + 1))
# save final model
save_model(model, params.model_root, params.src_dataset + '-' + params.tgt_dataset + "-dann-final.pt")
return model
def adjust_learning_rate(optimizer, p):
lr_0 = 0.01
alpha = 10
beta = 0.75
lr = lr_0 / (1 + alpha * p)**beta
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def adjust_learning_rate_office(optimizer, p):
lr_0 = 0.001
alpha = 10
beta = 0.75
lr = lr_0 / (1 + alpha * p)**beta
for param_group in optimizer.param_groups[:2]:
param_group['lr'] = lr
for param_group in optimizer.param_groups[2:]:
param_group['lr'] = 10 * lr
return lr
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Provide stub objects that can act as stand-in "dummy" datasets for simple use
cases, like getting all classes in a dataset. This exists so that demos can be
run without requiring users to download/install datasets first.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from utils.collections import AttrDict
def get_coco_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def get_tt100k_dataset():
ds = AttrDict()
classes = [
'__background__', 'pl40', 'p26', 'p27', 'p5', 'ip', 'pl30', 'pn', 'w30',
'p11', 'pl5', 'wo', 'io', 'po', 'i4', 'pl70', 'pl80', 'pl50', 'ph4', 'pl100',
'il80', 'il70', 'il60', 'pne', 'i2', 'pg', 'p17', 'p12', 'p22', 'pl60',
'pm30', 'pl120', 'il110', 'il90', 'p10', 'w57', 'w55', 'ph4.5', 'w13',
'pl20', 'w59', 'i5', 'w63', 'p16', 'w32', 'pb', 'pl110', 'il100', 'ph5',
'p3', 'w58', 'ph2', 'pm55', 'p19', 'pl25', 'pm20', 'pr40', 'ph3.5', 'p18',
'w3', 'p8', 'ps', 'ph2.8', 'w12', 'pa14', 'p6', 'p9', 'p23', 'ph3', 'w47',
'il50', 'pr30', 'w37', 'w46', 'pm35', 'pr100', 'i10', 'pl15', 'w34', 'i13',
'pl10', 'p1', 'i12', 'pm2', 'pl90', 'pm10', 'pr20', 'pm40', 'w16', 'w15', 'i3',
'ph2.5', 'p15', 'pm8', 'pa12', 'w21', 'pa13', 'pr50', 'p13', 'pa10', 'ph2.2',
'ph4.2', 'pm5', 'i1', 'pr60', 'w42', 'pw3.2', 'p21', 'p25', 'pr70', 'w22',
'w10', 'p4', 'p14', 'pm13', 'pw4.2', 'pm50', 'w35', 'pl0', 'p2', 'w45', 'w8',
'w41', 'pl35', 'ph4.3', 'ph3.2', 'p20', 'pa8', 'ph2.1', 'pr80', 'pm15', 'i11',
'w20', 'i14', 'ph4.8', 'ph1.5', 'ph2.9', 'w18', 'w5', 'w38', 'pr10', 'pw2',
'pw3', 'pw4.5', 'p28', 'ph5.3', 'pw2.5', 'pw4', 'ph2.4', 'pw3.5', 'w66',
'p24', 'pc', 'pl4', 'pm1.5', 'ph3.3', 'w43', 'w31', 'ph5.5', 'pm46',
'pm25', 'w24', 'w48', 'w50', 'w26', 'w60', 'ph4.4', 'w49', 'ph2.6',
'i15', 'p7', 'pn40', 'pl65', 'w1', 'w62', 'w44']
ds.classes = {i: name for i, name in enumerate(classes)}
return ds |
import email
@then(u'my details should match')
def step_impl(context):
context.execute_steps(u'''
Then I should see "$first_name"
And I should see "$last_name"
And I should see "$contact_number"
And I should see "$email"
''')
@when(u'I confirm and submit my plea')
def step_impl(context):
context.execute_steps(u'''
When I check "understand"
And I press "Make your pleas"
''')
@then(u'I should see the confirmation page')
def step_impl(context):
context.execute_steps(u'''
Then the browser's URL should be "plea/complete/"
And I should see "Your pleas have been sent to the magistrate"
And I should see "Your URN: $urn"
''')
@then(u'I should receive the confirmation email')
def step_impl(context):
context.execute_steps(u'''
Then I should receive an email at "$email" with subject "Online plea submission confirmation"
# And I should receive an email at "$email" containing "Your online pleas have been submitted"
''')
# the above behaving step is failing in library code
persona = context.persona
messages = context.mail.messages_for_user(persona['email'])
text = str(messages[0])
assert 'Your online pleas have been submitted' in text
assert 'Your URN: {}'.format(persona['urn']) in text
@then(u'police should receive confirmation email')
def step_impl(context):
context.execute_steps(u'''
Then I should receive an email at "[email protected]" with subject "POLICE ONLINE PLEA: 00/FF/12345/60 <SJP> SMITH John"
''')
@then(u'the court should receive my plea email with attached details')
def step_impl(context):
persona = context.persona
name = "{} {}".format(persona['last_name'].upper(), persona['first_name'])
context.execute_steps(u'''
Then I should receive an email at "[email protected]" with subject "ONLINE PLEA: $urn <SJP> %s"
And I should receive an email at "[email protected]" with attachment "plea.html"
''' % name)
messages = context.mail.messages_for_user('[email protected]')
attachment = email.message_from_string(messages[0]).get_payload(1)
text = str(attachment)
assert persona['first_name'] in text
assert persona['last_name'] in text
assert persona['email'] in text
assert persona['contact_number'] in text
assert persona['urn'] in text
assert 'Charge 1' in text
assert 'Charge 2' in text
assert 'Your employment status' in text
assert 'Total weekly income' in text
|
#!/usr/bin/python
'''
'''
from ansible.module_utils.basic import AnsibleModule
from Crypto.Cipher import AES
#from Crypto.Util.Padding import pad, unpad
from Crypto import Random
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
import base64, os, random, tarfile, struct
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'stratio'
}
class FileCryptException(Exception):
"""docstring for FileCryptException"""
def __init__(self, msg, exception_type):
super(FileCryptException, self).__init__()
self.msg = msg
self.exception_type = exception_type
DOCUMENTATION = '''
---
module: file_cript
short_description: Encrypts or decrypts a file
version_added: "2.8"
description:
- "This module allows users encrypt or decrypt a file using RSA key files"
options:
src:
description:
- Source file to encrypt or decrypt
required: true
dest:
description:
- Optional destination path. Default is source path with a '.crypt' suffix.
required: false
op:
description:
- "encrypt" / "decrypt".
required: true
rm_src:
description:
- If true, this module will delete the source file once the operation is finished. Default is false
required: false
rsa_key_raw:
description:
- Raw public/private key to encrypt/decrypt the file.
required: true
rsa_key_path:
description:
- Path to the public/private key to encrypt/decrypt the file.
required: true
author:
- Viktor Jacynycz ([email protected])
'''
EXAMPLES = '''
file_crypt:
src: /workspace/my_big_file.data
op: cypher
rm_src: no
rsa_key_raw: "{{ lookup('file', key_dir + '/public_key.pem') }}"
file_crypt:
src: /workspace/my_big_file.data
dest: /workspace/filencrypted.crypt
op: cypher
rm_src: yes
rsa_key_path: "/tmp/rsa_keys/public_key.pem"
file_crypt:
src: /workspace/my_big_file.data.crypt
op: decypher
rsa_key_path: "{{ lookup('file', key_dir + '/private_key.pem') }}"
'''
RETURN = '''
cryptlog:
description: Log text about the operation
type: str
returned: always
'''
def run_module():
'''Main module'''
module_args = dict(
src=dict(type='str', required=True),
dest=dict(type='str', required=False, default=''),
op=dict(type='str', required=True),
rm_src=dict(type='bool', required=False, default=False),
rsa_key_raw=dict(type='str', required=False, default=''),
rsa_key_path=dict(type='str', required=False, default='')
)
result = dict(
changed=False,
log=[]
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
error = dict(
msg='',
error_type=''
)
if module.check_mode:
module.exit_json(**result)
_src = module.params['src']
_dest = module.params['dest']
_op = module.params['op']
_rm_src = module.params['rm_src']
_rsa_key_raw = module.params['rsa_key_raw']
_rsa_key_path = module.params['rsa_key_path']
log = []
try:
key = load_key(_rsa_key_raw, _rsa_key_path, _op)
if _op == 'encrypt':
encrypt_operation(key, _src, _dest, _rm_src, log)
elif _op == 'decrypt':
decrypt_operation(key, _src, _dest, _rm_src, log)
else:
raise FileCryptException("Parameter 'op' must be ['encrypt','decrypt']",
"Wrong operation")
except FileCryptException as snake_case_error:
error['msg'] = snake_case_error.msg
error['error_type'] = snake_case_error.exception_type
module.fail_json(**error)
result['log'] = log
module.exit_json(**result)
def load_key(rsa_key_raw, rsa_key_path, _op):
try:
# Try to load raw RSA key
if rsa_key_raw == '':
with open(rsa_key_path,'r') as rsa_public_file:
rsa_key_data = RSA.importKey(rsa_public_file.read())
else:
rsa_key_data = RSA.importKey(rsa_key_raw)
if _op == 'encrypt':
return PKCS1_OAEP.new(rsa_key_data.publickey())
else:
return PKCS1_OAEP.new(rsa_key_data)
except Exception as other_error:
raise FileCryptException("Key file could not be loaded. "+str(other_error),
"Keyfile error")
def encrypt_operation(key, src, dest, rm_src, log):
log.append('Encrypting file '+src)
# Generate a new random AES key
aeskey = Random.new().read(32)
if dest == '':
dest = src + ".crypt"
encrypt_file(src, aeskey, dest)
# Encrypt the key using RSA
dest_dirname = os.path.dirname(dest)
ciphertext = key.encrypt(aeskey)
with open(dest_dirname + '/aes_key.crypt','wb') as rsafile:
rsafile.write(base64.b64encode(ciphertext))
log.append('Encrypting complete')
# Generate a tar containing the file encrypted and the key
log.append('Generating tar file')
with tarfile.open(dest + '.tar', "w:") as tar:
tar.add(dest_dirname + '/aes_key.crypt', arcname='aes_key.crypt')
tar.add(dest, arcname=os.path.basename(dest) )
os.remove(dest_dirname + '/aes_key.crypt')
os.remove(dest)
log.append('Tar file generated: ' + dest + '.tar')
# Remove src file if rm_src is true
if rm_src:
os.remove(src)
log.append('Removed source file')
def decrypt_operation(key, src, dest, rm_src, log):
log.append('Decrypting file '+src)
# Extract tar file
with tarfile.open(src, 'r:') as tgz:
tgz.extractall(path=os.path.dirname(src))
# Get files
cryptfile = src[:-4]
aes_key_path = os.path.dirname(src) + '/aes_key.crypt'
if dest == '':
if cryptfile.endswith('.crypt'):
dest = cryptfile[:-6]
else:
dest = src + ".crypt"
with open(aes_key_path, 'rb') as encrypted_key:
# Decrypt the key file using RSA
aes_key = key.decrypt(base64.b64decode(encrypted_key.read()))
# Decrypt the file using the decrypted key
decrypt_file(cryptfile, aes_key, dest)
log.append('Decrypted file '+ dest)
os.remove(cryptfile)
os.remove(aes_key_path)
# Remove src file if rm_src is true
if rm_src:
os.remove(src)
log.append('Removed source file')
def encrypt(message, key):
message = pad(message,AES.block_size)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(message)
def decrypt(ciphertext, key):
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return unpad(plaintext,AES.block_size)
def decrypt_file(in_filename, key, out_filename=None, chunksize=24*1024):
""" Decrypts a file using AES (CBC mode) with the
given key. Parameters are similar to encrypt_file,
with one difference: out_filename, if not supplied
will be in_filename without its last extension
(i.e. if in_filename is 'aaa.zip.enc' then
out_filename will be 'aaa.zip')
"""
if not out_filename:
out_filename = os.path.splitext(in_filename)[0]
with open(in_filename, 'rb') as infile:
origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(origsize)
def encrypt_file(in_filename, key, out_filename=None, chunksize=64*1024):
""" Encrypts a file using AES (CBC mode) with the
given key.
key:
The encryption key - a string that must be
either 16, 24 or 32 bytes long. Longer keys
are more secure.
in_filename:
Name of the input file
out_filename:
If None, '<in_filename>.enc' will be used.
chunksize:
Sets the size of the chunk which the function
uses to read and encrypt the file. Larger chunk
sizes can be faster for some files and machines.
chunksize must be divisible by 16.
"""
if not out_filename:
out_filename = in_filename + '.crypt'
#iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))
iv = Random.new().read(AES.block_size)
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += ' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
def old_encrypt_file(file_name, key, dest):
with open(file_name, 'rb') as fo:
plaintext = fo.read()
enc = encrypt(plaintext, key)
with open(dest, 'wb') as fo:
fo.write(enc)
def old_decrypt_file(file_name, key, dest):
if dest == '':
dest = file_name + ".decrypt"
with open(file_name, 'rb') as fo:
ciphertext = fo.read()
dec = decrypt(ciphertext, key)
with open(dest, 'wb') as fo:
fo.write(dec)
def main():
'''Main function'''
run_module()
if __name__ == '__main__':
main()
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import unittest
from mock import Mock, patch
from django.contrib.auth.models import User
from datawinners.accountmanagement.models import NGOUserProfile
from datawinners.alldata import helper
from datawinners.alldata.helper import get_all_project_for_user
from datawinners.project.couch_view_helper import get_all_projects
class TestHelper(unittest.TestCase):
def setUp(self):
self.database_manager = helper.get_database_manager
self.all_projects = get_all_projects
helper.get_database_manager = stub_get_database_manager
def _get_normal_user(self):
user = Mock(User)
normal_profile = Mock(NGOUserProfile)
normal_profile.reporter = False
user.get_profile.return_value = normal_profile
return user
def _get_reporter_user(self):
user = Mock(User)
reporter_profile = Mock(NGOUserProfile)
reporter_profile.reporter = True
reporter_profile.reporter_id = 'something'
user.get_profile.return_value = reporter_profile
return user
# def test_should_return_all_projects(self):
# user = self._get_normal_user()
# with patch("datawinners.alldata.helper.get_all_projects") as get_all_projects_mock:
# get_all_projects_mock.return_value = {"project_name": "hello world"}
# projects = get_all_project_for_user(user)
# assert projects["project_name"] == "hello world"
# def test_should_return_all_projects_for_user(self):
# user = self._get_reporter_user()
# get_all_projects = stub_get_all_projects_for_reporter
# projects = get_all_project_for_user(user)
# assert projects["project_name"] == "hello world"
def test_should_return_disabled_and_display_none_for_reporter(self):
user = self._get_reporter_user()
disabled, hide = helper.get_visibility_settings_for(user)
assert disabled == 'disable_link_for_reporter'
assert hide == 'none'
def test_should_return_enabled_and_display_for_other(self):
user = self._get_normal_user()
disabled, hide = helper.get_visibility_settings_for(user)
assert hide == ""
assert disabled == ""
def test_should_return_DataSubmission_for_reporter(self):
user = self._get_reporter_user()
assert helper.get_page_heading(user) == 'Data Submission'
def test_should_return_AllData_for_other(self):
user = self._get_normal_user()
assert helper.get_page_heading(user) == 'Questionnaires'
def tearDown(self):
get_all_projects = self.all_projects
helper.get_database_manager = self.database_manager
def stub_get_database_manager(*args):
return Mock()
def stub_get_all_projects_for_reporter(*args):
assert args[0] is not None
assert args[1] is not None
return {"project_name": "hello world"}
def stub_get_all_projects(*args):
assert args[0] is not None
return {"project_name": "hello world"}
|
from clld.web.maps import ParameterMap, Map
class NumeralParameterMap(ParameterMap):
def get_options(self):
return {
'base_layer': 'Esri.WorldPhysical',
'icon_size': 15,
'max_zoom': 9,
'hash': True,
}
class NumeralLanguagesMap(Map):
def get_options(self):
return {
'base_layer': 'Esri.WorldPhysical',
'icon_size': 15,
'max_zoom': 9,
'hash': True,
'info_query': {'map_pop_up': 1},
}
def includeme(config):
config.register_map('parameter', NumeralParameterMap)
config.register_map('languages', NumeralLanguagesMap)
|
#!/usr/bin/env python3
#
# Copyright 2022 Graviti. Licensed under MIT License.
#
"""Parameter type releated classes."""
from typing import TYPE_CHECKING
from typing import Any as TypingAny
from typing import Dict, List, Sequence, Tuple, Type, Union
from graviti.portex.base import PortexType as ClassPortexType
from graviti.portex.field import Fields as ClassFields
from graviti.portex.package import Imports
if TYPE_CHECKING:
from graviti.portex.factory import Dynamic
class ParameterType:
"""The base class of parameter type."""
@staticmethod
def check(_: TypingAny) -> TypingAny:
"""Check the parameter type.
Arguments:
_: The argument which needs to be checked.
Raises:
NotImplementedError: The check method in base class should never be called.
"""
raise NotImplementedError
@staticmethod
def load(content: TypingAny, _: Imports) -> TypingAny:
"""Create an instance of the parameter type from the python content.
Arguments:
content: A python presentation of the parameter type.
_: The imports of the parameter type.
Returns:
An instance of the parameter type.
"""
return content
@staticmethod
def dump(arg: TypingAny) -> TypingAny:
"""Dump the parameter type instance into the python presentation.
Arguments:
arg: The parameter type instance.
Returns:
The python presentation of the input instance.
"""
return arg
PType = Union[Type[ParameterType], "Dynamic"]
class Any(ParameterType):
"""Unconstrained parameter type."""
@staticmethod
def check(arg: TypingAny) -> TypingAny:
"""Check the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
The input argument unchanged.
"""
return arg
class Boolean(ParameterType):
"""Parameter type for JSON Boolean."""
@staticmethod
def check(arg: TypingAny) -> TypingAny:
"""Check the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
The input argument unchanged.
Raises:
TypeError: When the input argument is not a JSON boolean (bool in python).
"""
if not isinstance(arg, bool):
raise TypeError("Argument should be a bool")
return arg
class Array(ParameterType):
"""Parameter type for JSON Array."""
@staticmethod
def check(arg: TypingAny) -> Sequence[TypingAny]:
"""Check the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
The input argument unchanged.
Raises:
TypeError: When the input argument is not a JSON array (Sequence in python).
"""
if not isinstance(arg, Sequence):
raise TypeError("Argument should be a Sequence")
return arg
class Field(ParameterType):
"""Parameter type for Portex record field."""
@staticmethod
def check(arg: TypingAny) -> Tuple[str, ClassPortexType]:
"""Check and transfer the parameter type.
Arguments:
arg: The argument which needs to be checked.
Raises:
TypeError: When the input argument is not a tuple of a str and a PortexType.
Returns:
A tuple of str and PortexType created by the input argument.
"""
name, portex_type = arg
if isinstance(name, str) and isinstance(portex_type, ClassPortexType):
return name, portex_type
raise TypeError("Argument should be a tuple of a str and a PortexType")
@staticmethod
def load(content: Dict[str, TypingAny], imports: Imports) -> Tuple[str, ClassPortexType]:
"""Create Portex field instance from python dict.
Arguments:
content: A python dict representing a Portex field.
imports: The imports of the Portex field.
Returns:
A tuple of name and PortexType created from the input python dict.
"""
return content["name"], ClassPortexType.from_pyobj(content, imports)
@staticmethod
def dump(arg: Tuple[str, ClassPortexType]) -> Dict[str, TypingAny]:
"""Dump the input Portex field instance to a python dict.
Arguments:
arg: A tuple of name and PortexType.
Returns:
A Python dict representation of the Portex field.
"""
name, portex_type = arg
return {"name": name, **portex_type.to_pyobj(False)}
class Fields(ParameterType):
"""Parameter type for Portex record fields."""
@staticmethod
def check(arg: TypingAny) -> ClassFields:
"""Check and transfer the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
A :class:`Fields` instance created by the input argument.
"""
return ClassFields(arg)
@staticmethod
def load(content: List[TypingAny], imports: Imports) -> ClassFields:
"""Create Portex field list instance from python list.
Arguments:
content: A python list representing a Portex field list.
imports: The imports of the Portex field.
Returns:
A Portex field list instance created from the input python list.
"""
return ClassFields.from_pyobj(content, imports)
@staticmethod
def dump(arg: ClassFields) -> List[TypingAny]:
"""Dump the input Portex field list instance to a python list.
Arguments:
arg: A Portex field list instance.
Returns:
A Python list representation of the Portex field list.
"""
return arg.to_pyobj()
class Number(ParameterType):
"""Parameter type for JSON number."""
@staticmethod
def check(arg: TypingAny) -> float:
"""Check the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
The input argument unchanged.
Raises:
TypeError: When the input argument is not a JSON number (float and int in python).
"""
if not isinstance(arg, (float, int)):
raise TypeError("Argument should be a float or int")
return arg
class Integer(ParameterType):
"""Parameter type for JSON integer."""
@staticmethod
def check(arg: TypingAny) -> float:
"""Check the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
The input argument unchanged.
Raises:
TypeError: When the input argument is not a JSON integer (int in python).
"""
if not isinstance(arg, int):
raise TypeError("Argument should be a int")
return arg
class PortexType(ParameterType):
"""Parameter type for Portex type."""
@staticmethod
def check(arg: TypingAny) -> ClassPortexType:
"""Check the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
The input argument unchanged.
Raises:
TypeError: When the input argument is not a Portex type.
"""
if not isinstance(arg, ClassPortexType):
raise TypeError("Argument should be a Portex type")
return arg
@staticmethod
def load(content: Dict[str, TypingAny], imports: Imports) -> ClassPortexType:
"""Create Portex type instance from python dict.
Arguments:
content: A python dict representing a Portex type.
imports: The imports of the Portex type.
Returns:
A Portex type instance created from the input python dict.
"""
return ClassPortexType.from_pyobj(content, imports)
@staticmethod
def dump(arg: ClassPortexType) -> Dict[str, TypingAny]:
"""Dump the instance to a python dict.
Arguments:
arg: A Portex type instance.
Returns:
A python dict representation of the Portex type.
"""
return arg.to_pyobj(False)
class String(ParameterType):
"""Parameter type for JSON string."""
@staticmethod
def check(arg: TypingAny) -> str:
"""Check the parameter type.
Arguments:
arg: The argument which needs to be checked.
Returns:
The input argument unchanged.
Raises:
TypeError: When the input argument is not a JSON string (str in python).
"""
if not isinstance(arg, str):
raise TypeError("Argument should be a string")
return arg
class TypeName(String):
"""Parameter type for Portex type name."""
|
# Random utility functions.
import itertools
# Flatten a list of lists, e.g. [[1, 2], [3, 4]] => [1, 2, 3, 4].
def flatten(l):
return list(itertools.chain.from_iterable(l)
|
import time
import numpy as np
import pandas as pd
from research.gpq.icinco_demo import evaluate_performance
from ssmtoybox.mtran import UnscentedTransform
from ssmtoybox.ssinf import ExtendedKalman, ExtendedKalmanGPQD
from ssmtoybox.ssmod import UNGMTransition, UNGMMeasurement
from ssmtoybox.utils import GaussRV
steps, mc = 50, 10 # time steps, mc simulations
# setup univariate non-stationary growth model
x0 = GaussRV(1, cov=np.atleast_2d(5.0))
q = GaussRV(1, cov=np.atleast_2d(10.0))
dyn = UNGMTransition(x0, q) # dynamics
r = GaussRV(1)
obs = UNGMMeasurement(r, 1) # observation model
x = dyn.simulate_discrete(steps, mc)
z = obs.simulate_measurements(x)
# use only the central sigma-point
usp_0 = np.zeros((dyn.dim_in, 1))
usp_ut = UnscentedTransform.unit_sigma_points(dyn.dim_in)
# set the RBF kernel hyperparameters
hyp_rbf = np.array([[1.0] + dyn.dim_in*[3.0]])
hyp_rbf_ut = np.array([[8.0] + dyn.dim_in*[0.5]])
# derivative observations only at the central point
der_mask = np.array([0])
# filters/smoothers to test
algorithms = (
# EKF, GPQ+D w/ affine kernel, GPQ+D w/ RBF kernel (el --> infty)
ExtendedKalman(dyn, obs),
# GPQ+D RBF kernel w/ single sigma-point, becomes EKF for el --> infinity
ExtendedKalmanGPQD(dyn, obs, hyp_rbf, hyp_rbf),
)
num_alg = len(algorithms)
# space for estimates
mean_f, cov_f = np.zeros((dyn.dim_in, steps, mc, num_alg)), np.zeros((dyn.dim_in, dyn.dim_in, steps, mc, num_alg))
mean_s, cov_s = np.zeros((dyn.dim_in, steps, mc, num_alg)), np.zeros((dyn.dim_in, dyn.dim_in, steps, mc, num_alg))
# do filtering/smoothing
t0 = time.time() # measure execution time
print('Running filters/smoothers ...')
for a, alg in enumerate(algorithms):
print('{}'.format(alg.__class__.__name__)) # print filter/smoother name
for sim in range(mc):
mean_f[..., sim, a], cov_f[..., sim, a] = alg.forward_pass(z[..., sim])
mean_s[..., sim, a], cov_s[..., sim, a] = alg.backward_pass()
alg.reset()
print('Done in {0:.4f} [sec]'.format(time.time() - t0))
# evaluate perfomance
scores = evaluate_performance(x, mean_f, cov_f, mean_s, cov_s)
rmseMean_f, nciMean_f, nllMean_f, rmseMean_s, nciMean_s, nllMean_s = scores[:6]
rmseStd_f, nciStd_f, nllStd_f, rmseStd_s, nciStd_s, nllStd_s = scores[6:]
# rmseMean_f, rmseMean_s = rmseMean_f.squeeze(), rmseMean_s.squeeze()
# nciMean_f, nciMean_s = nciMean_f.squeeze(), nciMean_s.squeeze()
# nllMean_f, nllMean_s = nllMean_f.squeeze(), nllMean_s.squeeze()
# put data into Pandas DataFrame for fancy printing and latex export
row_labels = ['EKF', 'EKF-GPQD'] # ['EKF', 'GPQD-RBF', 'GPQD-AFFINE', 'UKF', 'GPQD-UT-RBF']
col_labels = ['RMSE', '2STD', 'NCI', '2STD', 'NLL', '2STD']
pd.set_option('precision', 4, 'max_columns', 6)
table_f = pd.DataFrame(np.hstack((rmseMean_f, rmseStd_f, nciMean_f, nciStd_f, nllMean_f, nllStd_f)),
index=row_labels, columns=col_labels)
table_s = pd.DataFrame(np.hstack((rmseMean_s, rmseStd_s, nciMean_s, nciStd_s, nllMean_s, nllStd_s)),
index=row_labels, columns=col_labels)
# print tables
print('Filter metrics')
print(table_f)
print('Smoother metrics')
print(table_s)
|
from functools import partial
import numpy as np
from modAL.batch import uncertainty_batch_sampling
from modAL.models import ActiveLearner
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
# Set our RNG for reproducibility.
RANDOM_STATE_SEED = 123
np.random.seed(RANDOM_STATE_SEED)
iris = load_iris()
X_raw = iris['data']
y_raw = iris['target']
# Define our PCA transformer and fit it onto our raw dataset.
pca = PCA(n_components=2, random_state=RANDOM_STATE_SEED)
transformed_iris = pca.fit_transform(X=X_raw)
# Isolate the data we'll need for plotting.
x_component, y_component = transformed_iris[:, 0], transformed_iris[:, 1]
# Isolate our examples for our labeled dataset.
n_labeled_examples = X_raw.shape[0]
training_indices = np.random.randint(low=0, high=n_labeled_examples + 1, size=3)
X_train = X_raw[training_indices]
y_train = y_raw[training_indices]
# Isolate the non-training examples we'll be querying.
X_pool = np.delete(X_raw, training_indices, axis=0)
y_pool = np.delete(y_raw, training_indices, axis=0)
# Pre-set our batch sampling to retrieve 3 samples at a time.
BATCH_SIZE = 3
preset_batch = partial(uncertainty_batch_sampling, n_instances=BATCH_SIZE)
# Testing the cold-start
learner = ActiveLearner(
estimator=KNeighborsClassifier(n_neighbors=3),
query_strategy=preset_batch
)
cold_start_idx, cold_start_inst = learner.query(X_raw)
learner.teach(X_raw[cold_start_idx], y_raw[cold_start_idx])
# Specify our active learning model.
learner = ActiveLearner(
estimator=KNeighborsClassifier(n_neighbors=3),
X_training=X_train,
y_training=y_train,
query_strategy=preset_batch
)
predictions = learner.predict(X_raw)
# Record our learner's score on the raw data.
unqueried_score = learner.score(X_raw, y_raw)
# Pool-based sampling
N_RAW_SAMPLES = 20
N_QUERIES = N_RAW_SAMPLES // BATCH_SIZE
for index in range(N_QUERIES):
query_index, query_instance = learner.query(X_pool)
# Teach our ActiveLearner model the record it has requested.
X, y = X_pool[query_index], y_pool[query_index]
learner.teach(X=X, y=y)
# Remove the queried instance from the unlabeled pool.
X_pool = np.delete(X_pool, query_index, axis=0)
y_pool = np.delete(y_pool, query_index)
# Calculate and report our model's accuracy.
model_accuracy = learner.score(X_raw, y_raw)
predictions = learner.predict(X_raw)
|
import numpy as np
from det3d.core.bbox import box_np_ops
from det3d.core.sampler import preprocess as prep
from det3d.builder import build_dbsampler
from det3d.core.input.voxel_generator import VoxelGenerator
from det3d.core.utils.center_utils import (
draw_umich_gaussian, gaussian_radius)
from ..registry import PIPELINES
def _dict_select(dict_, inds):
for k, v in dict_.items():
if isinstance(v, dict):
_dict_select(v, inds)
else:
dict_[k] = v[inds]
def drop_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
@PIPELINES.register_module
class Preprocess(object):
def __init__(self, cfg=None, **kwargs):
self.shuffle_points = cfg.shuffle_points
self.min_points_in_gt = cfg.get("min_points_in_gt", -1)
self.mode = cfg.mode
if self.mode == "train":
self.global_rotation_noise = cfg.global_rot_noise
self.global_scaling_noise = cfg.global_scale_noise
self.global_translate_std = cfg.get('global_translate_std', 0)
self.class_names = cfg.class_names
if cfg.db_sampler != None:
# 1. defined in det3d/builder.py/build_dbsampler
# 2. defined in det3d/core/sampler/preprocess.py/ [DBFilterByMinNumPoint, DBFilterByDifficulty, DataBasePreprocessor]
# 3. defined in det3d/core/sampler/sample_ops.py/DataBaseSamplerV2
self.db_sampler = build_dbsampler(cfg.db_sampler)
else:
self.db_sampler = None
self.npoints = cfg.get("npoints", -1)
self.no_augmentation = cfg.get('no_augmentation', False)
def __call__(self, res, info):
# 1. save mode in res
res["mode"] = self.mode
# 2. get points from above res
if res["type"] in ["WaymoDataset"]:
if "combined" in res["lidar"]:
points = res["lidar"]["combined"]
else:
points = res["lidar"]["points"]
elif res["type"] in ["NuScenesDataset"]:
points = res["lidar"]["combined"]
else:
raise NotImplementedError
# 3. get anno_dict && gt_dict
if self.mode == "train":
anno_dict = res["lidar"]["annotations"]
gt_dict = {
"gt_boxes": anno_dict["boxes"],
"gt_names": np.array(anno_dict["names"]).reshape(-1),
}
if self.mode == "train" and not self.no_augmentation:
# 4.1 去掉不重要的框
selected = drop_arrays_by_name(
gt_dict["gt_names"], ["DontCare", "ignore", "UNKNOWN"]
)
_dict_select(gt_dict, selected)
# 4.2 去掉包含点数过少的框
if self.min_points_in_gt > 0:
# TODO : 返回 (n,), 指向有每个gt_box内部有多少点
# TODO CUT HERE
point_counts = box_np_ops.points_count_rbbox(
points, gt_dict["gt_boxes"]
)
mask = point_counts >= min_points_in_gt
_dict_select(gt_dict, mask)
# 4.3 mask记录gt labels数组各元素是否在有效class_names里面
gt_boxes_mask = np.array(
[n in self.class_names for n in gt_dict["gt_names"]], dtype=np.bool_
)
if self.db_sampler:
sampled_dict = self.db_sampler.sample_all(
res["metadata"]["image_prefix"],
gt_dict["gt_boxes"],
gt_dict["gt_names"],
res["metadata"]["num_point_features"],
False,
gt_group_ids=None,
calib=None,
road_planes=None)
if sampled_dict is not None:
sampled_gt_names = sampled_dict["gt_names"]
sampled_gt_boxes = sampled_dict["gt_boxes"]
sampled_points = sampled_dict["points"]
sampled_gt_masks = sampled_dict["gt_masks"]
gt_dict["gt_names"] = np.concatenate(
[gt_dict["gt_names"], sampled_gt_names], axis=0
)
gt_dict["gt_boxes"] = np.concatenate(
[gt_dict["gt_boxes"], sampled_gt_boxes]
)
gt_boxes_mask = np.concatenate(
[gt_boxes_mask, sampled_gt_masks], axis=0
)
points = np.concatenate([sampled_points, points], axis=0)
# 4.4 根据4.3 mask和sampled_gt_masks过滤gt_dict
_dict_select(gt_dict, gt_boxes_mask)
# 4.5 将gt_names 数组从str编码成int,所有label从1开始,保存到gt_dict
gt_classes = np.array(
[self.class_names.index(n) + 1 for n in gt_dict["gt_names"]],
dtype=np.int32,)
gt_dict["gt_classes"] = gt_classes
# 4.6 数据增强
gt_dict["gt_boxes"], points = prep.random_flip_both(gt_dict["gt_boxes"], points)
gt_dict["gt_boxes"], points = prep.global_rotation(
gt_dict["gt_boxes"], points, rotation=self.global_rotation_noise)
gt_dict["gt_boxes"], points = prep.global_scaling_v2(
gt_dict["gt_boxes"], points, *self.global_scaling_noise)
gt_dict["gt_boxes"], points = prep.global_translate_(
gt_dict["gt_boxes"], points, noise_translate_std=self.global_translate_std)
elif self.no_augmentation:
gt_boxes_mask = np.array(
[n in self.class_names for n in gt_dict["gt_names"]], dtype=np.bool_
)
_dict_select(gt_dict, gt_boxes_mask)
gt_classes = np.array(
[self.class_names.index(n) + 1 for n in gt_dict["gt_names"]],
dtype=np.int32,
)
gt_dict["gt_classes"] = gt_classes
if self.shuffle_points:
np.random.shuffle(points)
res["lidar"]["points"] = points
if self.mode == "train":
res["lidar"]["annotations"] = gt_dict
return res, info
@PIPELINES.register_module
class Voxelization(object):
def __init__(self, **kwargs):
cfg = kwargs.get("cfg", None)
self.range = cfg.range
self.voxel_size = cfg.voxel_size
self.max_points_in_voxel = cfg.max_points_in_voxel
self.max_voxel_num = [cfg.max_voxel_num, cfg.max_voxel_num] if isinstance(cfg.max_voxel_num, int) else cfg.max_voxel_num
self.double_flip = cfg.get('double_flip', False)
self.voxel_generator = VoxelGenerator(
voxel_size=self.voxel_size,
point_cloud_range=self.range,
max_num_points=self.max_points_in_voxel,
max_voxels=self.max_voxel_num[0],
)
def __call__(self, res, info):
voxel_size = self.voxel_generator.voxel_size
pc_range = self.voxel_generator.point_cloud_range
grid_size = self.voxel_generator.grid_size
if res["mode"] == "train":
gt_dict = res["lidar"]["annotations"]
bv_range = pc_range[[0, 1, 3, 4]]
mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"], bv_range)
_dict_select(gt_dict, mask)
res["lidar"]["annotations"] = gt_dict
max_voxels = self.max_voxel_num[0]
else:
max_voxels = self.max_voxel_num[1]
voxels, coordinates, num_points = self.voxel_generator.generate(
res["lidar"]["points"], max_voxels=max_voxels
)
num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
res["lidar"]["voxels"] = dict(
voxels=voxels,
coordinates=coordinates,
num_points=num_points,
num_voxels=num_voxels,
shape=grid_size,
range=pc_range,
size=voxel_size
)
double_flip = self.double_flip and (res["mode"] != 'train')
if double_flip:
flip_voxels, flip_coordinates, flip_num_points = self.voxel_generator.generate(
res["lidar"]["yflip_points"]
)
flip_num_voxels = np.array([flip_voxels.shape[0]], dtype=np.int64)
res["lidar"]["yflip_voxels"] = dict(
voxels=flip_voxels,
coordinates=flip_coordinates,
num_points=flip_num_points,
num_voxels=flip_num_voxels,
shape=grid_size,
range=pc_range,
size=voxel_size
)
flip_voxels, flip_coordinates, flip_num_points = self.voxel_generator.generate(
res["lidar"]["xflip_points"]
)
flip_num_voxels = np.array([flip_voxels.shape[0]], dtype=np.int64)
res["lidar"]["xflip_voxels"] = dict(
voxels=flip_voxels,
coordinates=flip_coordinates,
num_points=flip_num_points,
num_voxels=flip_num_voxels,
shape=grid_size,
range=pc_range,
size=voxel_size
)
flip_voxels, flip_coordinates, flip_num_points = self.voxel_generator.generate(
res["lidar"]["double_flip_points"]
)
flip_num_voxels = np.array([flip_voxels.shape[0]], dtype=np.int64)
res["lidar"]["double_flip_voxels"] = dict(
voxels=flip_voxels,
coordinates=flip_coordinates,
num_points=flip_num_points,
num_voxels=flip_num_voxels,
shape=grid_size,
range=pc_range,
size=voxel_size
)
return res, info
def flatten(box):
return np.concatenate(box, axis=0)
def merge_multi_group_label(gt_classes, num_classes_by_task):
num_task = len(gt_classes)
flag = 0
for i in range(num_task):
gt_classes[i] += flag
flag += num_classes_by_task[i]
return flatten(gt_classes)
@PIPELINES.register_module
class AssignLabel(object):
def __init__(self, **kwargs):
"""Return CenterNet training labels like heatmap, height, offset"""
assigner_cfg = kwargs["cfg"]
self.out_size_factor = assigner_cfg.out_size_factor
self.tasks = assigner_cfg.target_assigner.tasks
self.gaussian_overlap = assigner_cfg.gaussian_overlap
self._max_objs = assigner_cfg.max_objs
self._min_radius = assigner_cfg.min_radius
def __call__(self, res, info):
max_objs = self._max_objs
class_names_by_task = [t.class_names for t in self.tasks]
num_classes_by_task = [t.num_class for t in self.tasks]
# Calculate output featuremap size
grid_size = res["lidar"]["voxels"]["shape"]
pc_range = res["lidar"]["voxels"]["range"]
voxel_size = res["lidar"]["voxels"]["size"]
feature_map_size = grid_size[:2] // self.out_size_factor
example = {}
if res["mode"] == "train":
gt_dict = res["lidar"]["annotations"]
# reorganize the gt_dict by tasks
task_masks = []
flag = 0
for class_name in class_names_by_task:
task_masks.append(
[
np.where(
gt_dict["gt_classes"] == class_name.index(i) + 1 + flag
)
for i in class_name
]
)
flag += len(class_name)
task_boxes = []
task_classes = []
task_names = []
flag2 = 0
for idx, mask in enumerate(task_masks):
task_box = []
task_class = []
task_name = []
for m in mask:
task_box.append(gt_dict["gt_boxes"][m])
task_class.append(gt_dict["gt_classes"][m] - flag2)
task_name.append(gt_dict["gt_names"][m])
task_boxes.append(np.concatenate(task_box, axis=0))
task_classes.append(np.concatenate(task_class))
task_names.append(np.concatenate(task_name))
flag2 += len(mask)
for task_box in task_boxes:
# limit rad to [-pi, pi]
task_box[:, -1] = box_np_ops.limit_period(
task_box[:, -1], offset=0.5, period=np.pi * 2
)
# print(gt_dict.keys())
gt_dict["gt_classes"] = task_classes
gt_dict["gt_names"] = task_names
gt_dict["gt_boxes"] = task_boxes
res["lidar"]["annotations"] = gt_dict
draw_gaussian = draw_umich_gaussian
hms, anno_boxs, inds, masks, cats = [], [], [], [], []
for idx, task in enumerate(self.tasks):
hm = np.zeros((len(class_names_by_task[idx]), feature_map_size[1], feature_map_size[0]),
dtype=np.float32)
if res['type'] == 'NuScenesDataset':
# [reg, hei, dim, vx, vy, rots, rotc]
anno_box = np.zeros((max_objs, 10), dtype=np.float32)
elif res['type'] == 'WaymoDataset':
anno_box = np.zeros((max_objs, 10), dtype=np.float32)
else:
raise NotImplementedError("Only Support nuScene for Now!")
ind = np.zeros((max_objs), dtype=np.int64)
mask = np.zeros((max_objs), dtype=np.uint8)
cat = np.zeros((max_objs), dtype=np.int64)
num_objs = min(gt_dict['gt_boxes'][idx].shape[0], max_objs)
for k in range(num_objs):
cls_id = gt_dict['gt_classes'][idx][k] - 1
w, l, h = gt_dict['gt_boxes'][idx][k][3], gt_dict['gt_boxes'][idx][k][4], \
gt_dict['gt_boxes'][idx][k][5]
w, l = w / voxel_size[0] / self.out_size_factor, l / voxel_size[1] / self.out_size_factor
if w > 0 and l > 0:
radius = gaussian_radius((l, w), min_overlap=self.gaussian_overlap)
radius = max(self._min_radius, int(radius))
# be really careful for the coordinate system of your box annotation.
x, y, z = gt_dict['gt_boxes'][idx][k][0], gt_dict['gt_boxes'][idx][k][1], \
gt_dict['gt_boxes'][idx][k][2]
coor_x, coor_y = (x - pc_range[0]) / voxel_size[0] / self.out_size_factor, \
(y - pc_range[1]) / voxel_size[1] / self.out_size_factor
ct = np.array(
[coor_x, coor_y], dtype=np.float32)
ct_int = ct.astype(np.int32)
# throw out not in range objects to avoid out of array area when creating the heatmap
if not (0 <= ct_int[0] < feature_map_size[0] and 0 <= ct_int[1] < feature_map_size[1]):
continue
draw_gaussian(hm[cls_id], ct, radius)
new_idx = k
x, y = ct_int[0], ct_int[1]
cat[new_idx] = cls_id
ind[new_idx] = y * feature_map_size[0] + x
mask[new_idx] = 1
if res['type'] == 'NuScenesDataset':
vx, vy = gt_dict['gt_boxes'][idx][k][6:8]
rot = gt_dict['gt_boxes'][idx][k][8]
anno_box[new_idx] = np.concatenate(
(ct - (x, y), z, np.log(gt_dict['gt_boxes'][idx][k][3:6]),
np.array(vx), np.array(vy), np.sin(rot), np.cos(rot)), axis=None)
elif res['type'] == 'WaymoDataset':
vx, vy = gt_dict['gt_boxes'][idx][k][6:8]
rot = gt_dict['gt_boxes'][idx][k][-1]
anno_box[new_idx] = np.concatenate(
(ct - (x, y), z, np.log(gt_dict['gt_boxes'][idx][k][3:6]),
np.array(vx), np.array(vy), np.sin(rot), np.cos(rot)), axis=None)
else:
raise NotImplementedError("Only Support Waymo and nuScene for Now")
hms.append(hm)
anno_boxs.append(anno_box)
masks.append(mask)
inds.append(ind)
cats.append(cat)
# used for two stage code
boxes = flatten(gt_dict['gt_boxes'])
classes = merge_multi_group_label(gt_dict['gt_classes'], num_classes_by_task)
if res["type"] == "NuScenesDataset":
gt_boxes_and_cls = np.zeros((max_objs, 10), dtype=np.float32)
elif res['type'] == "WaymoDataset":
gt_boxes_and_cls = np.zeros((max_objs, 10), dtype=np.float32)
else:
raise NotImplementedError()
boxes_and_cls = np.concatenate((boxes,
classes.reshape(-1, 1).astype(np.float32)), axis=1)
num_obj = len(boxes_and_cls)
assert num_obj <= max_objs
# x, y, z, w, l, h, rotation_y, velocity_x, velocity_y, class_name
boxes_and_cls = boxes_and_cls[:, [0, 1, 2, 3, 4, 5, 8, 6, 7, 9]]
gt_boxes_and_cls[:num_obj] = boxes_and_cls
example.update({'gt_boxes_and_cls': gt_boxes_and_cls})
example.update({'hm': hms, 'anno_box': anno_boxs, 'ind': inds, 'mask': masks, 'cat': cats})
else:
pass
res["lidar"]["targets"] = example
return res, info
|
class Config(object):
from datetime import timedelta
SECRET_KEY = 'OVERRIDE THIS WITH A SECURE VALUE in instance/config.py!'
CELERY_BROKER_URL = 'redis://'
CELERY_RESULT_BACKEND = 'redis://'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERYBEAT_SCHEDULE = {
'update-agencies-every-week': {
'task': 'celerytasks.update_agencies',
'schedule': timedelta(days=7),
},
'update-routes-every-24h': {
'task': 'celerytasks.update_routes',
'schedule': timedelta(hours=24),
},
'update-predictions-every-9s': {
'task': 'celerytasks.update_predictions',
'schedule': timedelta(seconds=9),
},
'update-vehicle-locations-every-3s': {
'task': 'celerytasks.update_vehicle_locations',
'schedule': timedelta(seconds=3),
},
'delete-stale-predictions-every-5m': {
'task': 'celerytasks.delete_stale_predictions',
'schedule': timedelta(minutes=5),
},
'delete-stale-vehicle-locations-every-5m': {
'task': 'celerytasks.delete_stale_vehicle_locations',
'schedule': timedelta(minutes=5),
},
}
SQLALCHEMY_TRACK_MODIFICATIONS = False
PREDICTIONS_MAX_AGE = 5 * 60;
LOCATIONS_MAX_AGE = 5 * 60;
AGENCIES = ['rutgers']
# Stops with the same tag within this distance of each other will be averaged to one lat/lon point.
# 0.001 = 110 Meters (football field)
SAME_STOP_LAT = 0.005
SAME_STOP_LON = 0.005
# Map display parameters
MAP_ERROR_TILE_URL = 'http://tiles.antsar-static.com/generic/tile-blank-black.png'
MAP_TILE_URL = 'http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png'
MAP_TILE_SUBDOMAINS = ['a', 'b', 'c']
MAP_TILESET = 'rutgers-black'
MAP_LAT_PADDING = 0.03
MAP_LON_PADDING = 0.03
class ProdConfig(Config):
SQLALCHEMY_URI = 'postgresql://localhost/pybusmap_prod'
CELERY_BROKER_URL = 'redis://localhost/0'
CELERY_RESULT_BACKEND = 'redis://localhost/0'
class DevConfig(Config):
DEBUG = True
SQLALCHEMY_URI = 'postgresql://localhost/pybusmap_dev'
CELERY_BROKER_URL = 'redis://localhost/1'
CELERY_RESULT_BACKEND = 'redis://localhost/1'
|
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from builtins import *
import numpy
from cubeaccess.core import Coordinate, Variable, StorageUnitBase, StorageUnitDimensionProxy, StorageUnitStack
from cubeaccess.storage import NetCDF4StorageUnit, GeoTifStorageUnit
from cubeaccess.indexing import Range
class TestStorageUnit(StorageUnitBase):
def __init__(self, coords, vars):
self.coordinates = coords
self.variables = vars
def _get_coord(self, name):
coord = self.coordinates[name]
data = numpy.linspace(coord.begin, coord.end, coord.length, dtype=coord.dtype)
return data
def _fill_data(self, name, index, dest):
dest.fill(1)
ds1 = TestStorageUnit({
't': Coordinate(numpy.int, 100, 400, 4),
'y': Coordinate(numpy.float32, 0, 9.5, 20),
'x': Coordinate(numpy.float32, 9, 0, 10)
}, {
'B10': Variable(numpy.float32, numpy.nan, ('t', 'y', 'x'))
})
ds2 = TestStorageUnit({
't': Coordinate(numpy.int, 500, 600, 3),
'y': Coordinate(numpy.float32, 5, 14.5, 20),
'x': Coordinate(numpy.float32, 4, -5, 10)
}, {
'B10': Variable(numpy.float32, numpy.nan, ('t', 'y', 'x'))
})
netcdffiles = [
"/short/v10/dra547/injest_examples/multiple_band_variables/LS7_ETM_NBAR_P54_GANBAR01-002_089_078_2015_152_-26.nc",
"/short/v10/dra547/injest_examples/multiple_band_variables/LS7_ETM_NBAR_P54_GANBAR01-002_089_078_2015_152_-27.nc",
"/short/v10/dra547/injest_examples/multiple_band_variables/LS7_ETM_NBAR_P54_GANBAR01-002_089_078_2015_153_-26.nc",
"/short/v10/dra547/injest_examples/multiple_band_variables/LS7_ETM_NBAR_P54_GANBAR01-002_089_078_2015_153_-27.nc",
"/short/v10/dra547/injest_examples/multiple_band_variables/LS7_ETM_NBAR_P54_GANBAR01-002_089_078_2015_154_-26.nc",
"/short/v10/dra547/injest_examples/multiple_band_variables/LS7_ETM_NBAR_P54_GANBAR01-002_089_078_2015_154_-27.nc"
]
geotiffiles = [
# "/mnt/data/tiles/EPSG4326_1deg_0.00025pixel/LS7_ETM/142_-033/2010/LS7_ETM_NBAR_142_-033_2010-01-16T00-12-07.682499.tif",
# "/mnt/data/tiles/EPSG4326_1deg_0.00025pixel/LS7_ETM/142_-033/2010/LS7_ETM_FC_142_-033_2010-01-16T00-12-07.682499.tif",
# "/mnt/data/tiles/EPSG4326_1deg_0.00025pixel/LS7_ETM/142_-033/2010/LS7_ETM_NBAR_142_-033_2010-01-16T00-11-43.729979.tif",
# "/mnt/data/tiles/EPSG4326_1deg_0.00025pixel/LS7_ETM/142_-033/2010/LS7_ETM_FC_142_-033_2010-01-16T00-11-43.729979.tif",
# "/mnt/data/tiles/EPSG4326_1deg_0.00025pixel/LS7_ETM/142_-033/2010/LS7_ETM_NBAR_142_-033_2010-01-07T00-17-46.208174.tif",
"/g/data/rs0/tiles/EPSG4326_1deg_0.00025pixel/LS5_TM/142_-033/2004/LS5_TM_NBAR_142_-033_2004-01-07T23-59-21.879044.tif",
"/g/data/rs0/tiles/EPSG4326_1deg_0.00025pixel/LS5_TM/142_-033/2004/LS5_TM_NBAR_142_-033_2004-11-07T00-05-33.311000.tif",
"/g/data/rs0/tiles/EPSG4326_1deg_0.00025pixel/LS5_TM/142_-033/2004/LS5_TM_NBAR_142_-033_2004-12-25T00-06-26.534031.tif",
]
def _time_from_filename(f):
from datetime import datetime
dtstr = f.split('/')[-1].split('_')[-1][:-4]
# 2004-11-07T00-05-33.311000
dt = datetime.strptime(dtstr, "%Y-%m-%dT%H-%M-%S.%f")
return numpy.datetime64(dt, 's')
def test_storage_unit_dimension_proxy():
su = StorageUnitDimensionProxy(ds1, ('greg', 12.0))
data = su._get_coord('greg')
assert(data == numpy.array([12.0]))
data = su.get('B10')
assert(data.values.shape == (1, 4, 20, 10))
assert(data.dims == ('greg', 't', 'y', 'x'))
assert(numpy.all(data.values == 1))
# print(data)
# print (su.coordinates)
# print (su.variables)
data = su.get('B10', greg=Range(13, 14))
assert(data.values.size == 0)
def test_geotif_storage_unit():
files = geotiffiles
su = GeoTifStorageUnit(files[0])
assert(set(su.coordinates.keys()) == ({'x', 'y'}))
data = su.get('2', x=Range(142.5, 142.7), y=Range(-32.5, -32.2))
assert(len(data.coords['x']) == 801)
assert(len(data.coords['y']) == 1201)
assert(numpy.any(data.values != -999))
data = su.get('2', x=slice(500), y=slice(3400, None))
assert(len(data.coords['x']) == 500)
assert(len(data.coords['y']) == 600)
assert(numpy.any(data.values != -999))
# print(su.coordinates)
# print (su.variables)
# print(data)
def test_netcdf_storage_unit():
files = netcdffiles
su = NetCDF4StorageUnit(files[2])
assert(set(su.coordinates.keys()) == ({'longitude', 'latitude', 'time'}))
data = su.get('band2', longitude=Range(153.5, 153.7), latitude=Range(-25.5, -25.2))
assert(len(data.coords['longitude']) == 801)
assert(len(data.coords['latitude']) == 1201)
assert(numpy.any(data.values != -999))
# mds = StorageUnitSet([NetCDF4StorageUnit(filename) for filename in files])
# data = mds.get('band2')
# assert(np.any(data.values != -999))
# print(mds.get('band2'))
# print(mds.coordinates)
# print(mds.variables)
def test_storage_unit_stack():
#TODO: use ds1/ds2
files = geotiffiles
storage_units = [StorageUnitDimensionProxy(GeoTifStorageUnit(f), ('t', _time_from_filename(f))) for f in files]
stack = StorageUnitStack(storage_units, 't')
times = numpy.array([_time_from_filename(f) for f in files])
assert(numpy.all(stack._get_coord('t') == times))
trange = Range(numpy.datetime64('2004-11-07T00:05:33Z', 's'), numpy.datetime64('2004-12-25T00:06:26Z', 's'))
data = stack.get('2', t=trange, x=Range(142.5, 142.7), y=Range(-32.5, -32.2))
assert(len(data.coords['t']) == 2)
assert(len(data.coords['x']) == 801)
assert(len(data.coords['y']) == 1201)
assert(numpy.any(data.values != -999))
data = stack.get('2', t=slice(0, 2), x=slice(500), y=slice(3400, None))
assert(len(data.coords['t']) == 2)
assert(len(data.coords['x']) == 500)
assert(len(data.coords['y']) == 600)
assert(numpy.any(data.values != -999))
# print(stack.coordinates)
# print(stack.variables)
|
import datetime
import dateutil.tz
import numpy as np
import numpy.testing
import pytest
import cara.data.weather as wx
def test_nearest_wx_station():
melbourne_lat, melbourne_lon = -37.81739, 144.96751
station_rec = wx.nearest_wx_station(longitude=melbourne_lon, latitude=melbourne_lat)
station_name = station_rec[1].strip()
# Note: For Melbourne, the nearest station is 'MELBOURNE REGIONAL OFFICE',
# but the nearest location with suitable wx data is 'MELBOURNE ESSENDON'
assert station_name == 'MELBOURNE ESSENDON'
def test_refine():
source_times = [0, 3, 6, 9, 12, 15, 18, 21]
data = [0, 30, 60, 90, 120, 90, 60, 30]
time_bounds, data = wx.refine_hourly_data(source_times, data, 4)
# Notice that the expected data falls in the mid-point of the
# expected time bounds.
np.testing.assert_array_equal(time_bounds, [0., 6., 12., 18., 24.])
np.testing.assert_array_equal(data, [30., 90., 90., 30.])
def test_refine_offset():
source_times = [14, 20, 26, 32]
data = [200., 182, 168, 192]
time_bounds, data = wx.refine_hourly_data(source_times, data, 6)
# Notice that the expected data falls in the mid-point of the
# expected time bounds.
np.testing.assert_array_equal(time_bounds, [0., 4., 8., 12., 16., 20., 24.])
np.testing.assert_array_almost_equal(data, [168., 184., 194.666667, 200., 188., 177.333333])
def test_refine_non_monotonic():
source_times = [14, 20, 2, 8]
data = [200., 182, 168, 192]
time_bounds, data = wx.refine_hourly_data(source_times, data, 6)
# Notice that the expected data falls in the mid-point of the
# expected time bounds.
np.testing.assert_array_equal(time_bounds, [0., 4., 8., 12., 16., 20., 24.])
np.testing.assert_array_almost_equal(data, [168., 184., 194.666667, 200., 188., 177.333333])
def test_timezone_at__out_of_range():
with pytest.raises(ValueError, match='out of bounds'):
wx.timezone_at(latitude=88, longitude=181)
@pytest.mark.parametrize(
["latitude", "longitude", "expected_tz_name"],
[
[6.14275, 46.20833, 'Europe/Zurich'], # Geneva
[144.96751, -37.81739, "Australia/Melbourne"], # Melbourne
[-176.433333, -44.033333, 'Pacific/Chatham'], # Chatham Islands
]
)
def test_timezone_at__expected(latitude, longitude, expected_tz_name):
assert wx.timezone_at(latitude=longitude, longitude=latitude) == dateutil.tz.gettz(expected_tz_name)
assert wx.timezone_at(latitude=0, longitude=-175) == dateutil.tz.gettz('Etc/GMT+12')
assert wx.timezone_at(latitude=89.8, longitude=-170) == dateutil.tz.gettz('Etc/GMT+11')
|
from decimal import Decimal
from django.db.models import Q
from .models import CustomerInfo
import re
import zenhan
def ExtractNumber(org_str, data_type):
"""
引数で渡された文字列を半角に変換し、数字のみを抽出して返す。
param: org_str。例:'(0120)123-456
param: data_type。例:1=電話番号用、2=郵便番号用、3=法人番号用
return: org_strから数字のみを抽出した文字列。例:'0120123456'
"""
# 全角→半角変換
han_org_str = zenhan.z2h(org_str)
if data_type == 1: # 電話番号用
# カッコとハイフン以外を抽出
filterd_str = re.findall(r'[^\(\)\-()−]+', han_org_str)
elif data_type == 2: # 郵便番号用
# ハイフン以外を抽出
filterd_str = re.findall(r'[^\-−]+', han_org_str)
elif data_type == 3: # 法人番号用
# 法人番号は数字のみなので正規表現の抽出は行わない
filterd_str = han_org_str
# filterd_strは配列なので結合した文字列を返す
return ''.join(filterd_str)
def DecimalDefaultProc(obj):
"""
DecimalをJSONで出力可能にする
"""
if isinstance(obj, Decimal):
return float(obj)
raise TypeError
def CheckDuplicatePhoneNumber(phone_number, user):
"""
重複電話番号のチェックを行い、件数を返す。
"""
if not phone_number:
return 0
return CustomerInfo.objects.filter(
Q(tel_number1=phone_number) | Q(tel_number2=phone_number)
| Q(tel_number3=phone_number)).filter(
workspace=user.workspace, delete_flg='False').filter(
Q(public_status='1')
| Q(public_status='2')
| Q(author=user.email)
| Q(shared_edit_user=user)
| Q(shared_view_user=user)
| Q(shared_edit_group__in=user.my_group.all())
| Q(shared_view_group__in=user.my_group.all())).distinct(
).count()
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
def add_memory_config(cfg):
"""
Add config for Memory bank.
"""
cfg.MODEL.ROI_HEADS.TEMP_S = 48
cfg.MODEL.ROI_HEADS.MIN_CACHE = 20
cfg.MODEL.ROI_HEADS.MAX_CACHE = 60
cfg.MODEL.ROI_HEADS.RANDOM_SELECT = False
cfg.MODEL.ROI_HEADS.CACHE_CAT_FILE = "lvis0.5_rare_cats.txt"
cfg.MODEL.ROI_HEADS.CLS_LAYER = "cosine"
cfg.MODEL.ROI_HEADS.RUN = 1
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import socket
from scplib import *
from iplugin import Plugin
from icommand import *
from state import *
DEFAULT_IP = '127.0.0.1'
DEFAULT_PORT = 8888
listen_sock_dict = {}
server_sock_dict = {}
client_sock_dict = {}
class CommandServerBindListen(Command):
def add_args(self):
self.get_parser().add_argument('-i', '--ip', dest = 'ip', metavar = 'ip', action = "store", default = DEFAULT_IP, help='the server ip')
self.get_parser().add_argument('-p', '--port', dest = 'port', metavar = 'port', type = int, action = "store", default = DEFAULT_PORT, help='the server port')
self.get_parser().add_argument('-n', '--nblock', action = "store_true", default = False, help='the non block')
def perform(self, argv):
ns = self.get_parser().parse_args(argv[1:])
addr = (ns.ip, ns.port)
listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_sock.setblocking(True)
listen_sock.bind(addr)
listen_sock.listen(0x10)
fd = listen_sock.fileno()
listen_sock_dict[fd] = (ns.port, listen_sock)
log.debug("Server bind and listen at %s:%d [fd = %d]" %(ns.ip, ns.port, fd))
class CommandSocketList(Command):
def add_args(self):
pass
def perform(self, argv):
ns = self.get_parser().parse_args(argv[1:])
def dump_dict(name, d):
keys = d.keys()
keys.sort()
log.debug(name)
for k in keys:
log.debug("%4d :%d" %(k, d[k][0]))
dump_dict("listen socket:", listen_sock_dict)
dump_dict("server socket:", server_sock_dict)
dump_dict("client socket:", client_sock_dict)
class CommandServerAccept(Command):
def add_args(self):
self.get_parser().add_argument('fd', type = int, action = "store", help='the socket fd')
self.get_parser().add_argument('-n', '--nblock', action = "store_true", default = False, help='the non block')
def perform(self, argv):
ns = self.get_parser().parse_args(argv[1:])
listen_port, listen_sock = listen_sock_dict[ns.fd]
log.debug("Server accept...")
sock, addr = listen_sock.accept()
sock.setblocking(True)
fd = sock.fileno()
peer_ip, peer_port = sock.getpeername()
server_sock_dict[fd] = (peer_port, sock)
log.debug("Server accept connection from %s:%d [fd = %d]" %(peer_ip, peer_port, fd))
class CommandClientConnect(Command):
def add_args(self):
self.get_parser().add_argument('-i', '--ip', dest = 'ip', metavar = 'ip', action = "store", default = DEFAULT_IP, help='the server ip')
self.get_parser().add_argument('-p', '--port', dest = 'port', metavar = 'port', type = int, action = "store", default = DEFAULT_PORT, help='the server port')
self.get_parser().add_argument('-n', '--nblock', action = "store_true", default = False, help='the non block')
def perform(self, argv):
ns = self.get_parser().parse_args(argv[1:])
addr = (ns.ip, ns.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(True)
fd = sock.fileno()
sock.connect(addr)
local_ip, local_port = sock.getsockname()
client_sock_dict[fd] = (local_port, sock)
log.debug("client connect at %s:%d [fd = %d]" %(local_ip, local_port, fd))
class CommandSocketClose(Command):
def add_args(self):
self.get_parser().add_argument('fd', type = int, action = "store", help='the socket fd')
def perform(self, argv):
ns = self.get_parser().parse_args(argv[1:])
fd = ns.fd
if fd in listen_sock_dict:
listen_sock_dict[fd][1].close()
#del listen_sock_dict[fd]
if fd in server_sock_dict:
server_sock_dict[fd][1].close()
#del server_sock_dict[fd]
if fd in client_sock_dict:
client_sock_dict[fd][1].close()
#del client_sock_dict[fd]
class CommandSocketSend(Command):
def add_args(self):
self.get_parser().add_argument('fd', type = int, action = "store", help='the socket fd')
self.get_parser().add_argument('data', action = "store", help='the data to be send')
def perform(self, argv):
ns = self.get_parser().parse_args(argv[1:])
fd = ns.fd
data = b(ns.data)
sock = None
while True:
if fd in server_sock_dict:
sock = server_sock_dict[fd][1]
break
if fd in client_sock_dict:
sock = client_sock_dict[fd][1]
break
break
length = sock.send(data)
log.debug("send data length = %d" %(length))
log.debug(get_dump_string("send", data[:length]))
class CommandSocketRecv(Command):
def add_args(self):
self.get_parser().add_argument('fd', type = int, action = "store", help='the socket fd')
self.get_parser().add_argument('length', type = int, action = "store", help='the recv length')
def perform(self, argv):
ns = self.get_parser().parse_args(argv[1:])
fd = ns.fd
length = ns.length
sock = None
while True:
if fd in server_sock_dict:
sock = server_sock_dict[fd][1]
break
if fd in client_sock_dict:
sock = client_sock_dict[fd][1]
break
break
data = sock.recv(length)
log.debug("recv data length = %d" %(len(data)))
log.debug(get_dump_string("recv", data))
_cs_cmd_dict = {
"server-bind-listen": CommandServerBindListen,
"server-accept" : CommandServerAccept,
"client-connect" : CommandClientConnect,
"socket-close" : CommandSocketClose,
"socket-send" : CommandSocketSend,
"socket-recv" : CommandSocketRecv,
"socket-list" : CommandSocketList,
}
class TCPCSPlugin(Plugin):
def __init__(self):
self.__name = "cs"
self.__cmd_dict = _cs_cmd_dict
self.__var_dict = {}
def get_name(self):
return self.__name
def get_cmd_dict(self):
return self.__cmd_dict
def get_var_dict(self):
return self.__var_dict
def get_ext_matches(self):
return []
def register(plugin_dict):
plugin_object = TCPCSPlugin()
plugin_dict[plugin_object.get_name()] = plugin_object
|
from unittest import TestCase
from piccolo.columns import Varchar
from piccolo.table import create_table_class
class TestCreateTableClass(TestCase):
def test_create_table_class(self):
"""
Make sure a basic `Table` can be created successfully.
"""
_Table = create_table_class(class_name="MyTable")
self.assertEqual(_Table._meta.tablename, "my_table")
_Table = create_table_class(
class_name="MyTable", class_kwargs={"tablename": "my_table_1"}
)
self.assertEqual(_Table._meta.tablename, "my_table_1")
column = Varchar()
_Table = create_table_class(
class_name="MyTable", class_members={"name": column}
)
self.assertIn(column, _Table._meta.columns)
def test_protected_tablenames(self):
"""
Make sure that the logic around protected tablenames still works as
expected.
"""
with self.assertRaises(ValueError):
create_table_class(class_name="User")
with self.assertRaises(ValueError):
create_table_class(
class_name="MyUser", class_kwargs={"tablename": "user"}
)
# This shouldn't raise an error:
create_table_class(
class_name="User", class_kwargs={"tablename": "my_user"}
)
|
import py_midicsv as pm
import numpy as np
import pandas as pd
import librosa
import matplotlib.pyplot as plt
from PIL import Image
import warnings
class Generated_Song:
def __init__(self, song_length=10000):
'''Break data into metadata, midi data, and end track data'''
self.meta_data = ['0, 0, Header, 0, 1, 96\n', '1, 0, Start_track\n', '1, 0, Title_t, "1 1-Keyzone Classic\\000"\n', '1, 0, Time_signature, 4, 2, 36, 8\n', '1, 0, Time_signature, 4, 2, 36, 8\n']
self.track_end = [f'1, {song_length}, End_track\n', '0, 0, End_of_file']
self.midi_data = []
'''Initialize and populate Numpy matrices for representing MIDI actions'''
self.song_length = song_length
self.midi_note_array = np.zeros((128, self.song_length))
self.midi_df = pd.DataFrame(columns=['track', 'tick', 'control', 'channel', 'control_num', 'velocity'])
def add_note(self, note_start, note_value, note_length, velocity):
self.midi_df.loc[len(self.midi_df.index)] = [1, note_start, 'Note_on_c', 0, note_value, velocity]
self.midi_df.loc[len(self.midi_df.index)] = [1, note_start+note_length, 'Note_off_c', 0, note_value, velocity]
# self.populate_midi_note_array()
def add_randomized_notes(self, num_notes=100, time='uniform', note_value='normal', note_length='uniform', velocity='normal'):
note_starts, note_vals, note_lens, note_vels = None, None, None, None
if time == 'uniform':
note_starts = np.random.uniform(low=0, high=self.song_length, size=num_notes)
if note_value == 'normal':
note_vals = np.random.normal(loc=64, scale=21, size=num_notes)
elif note_value == 'uniform':
note_vals = np.random.uniform(low=0, high=127, size=num_notes)
if note_length == 'uniform':
note_lens = np.random.uniform(low=10, high=1000, size=num_notes)
elif note_length == 'normal':
note_lens = np.random.normal(loc=100, scale=300, size=num_notes)
if velocity == 'normal':
note_vels = np.random.normal(loc=64, scale=21, size=num_notes)
note_starts = np.round(note_starts)
note_vals = np.round(note_vals)
note_lens = np.round(note_lens)
note_vels = np.round(note_vels)
note_vals[note_vals > 127] = 127
note_vals[note_vals < 0] = 0
note_lens[note_lens < 10] = 10
note_vels[note_vels > 127] = 127
note_vels[note_vels < 10] = 10
midi_values = np.column_stack((note_starts, note_vals, note_lens, note_vels))
print(midi_values)
# note_start, note_value, note_length, velocity
for row in midi_values:
self.add_note(int(row[0]), int(row[1]), int(row[2]), int(row[3]))
print(row)
def sort_midi_df(self):
self.midi_df.sort_values(by='tick', inplace=True)
def export_midi(self, path_out):
midi_out = []
for line in self.meta_data:
midi_out.append(line)
self.sort_midi_df()
df = self.midi_df.copy()
cols = df.columns
df['midi_string'] = df[cols].apply(lambda row: ', '.join(row.values.astype(str)) + '\n', axis=1)
midi_strings = df['midi_string'].tolist()
midi_out += midi_strings
midi_out += self.track_end
midi_object = pm.csv_to_midi(midi_out)
with open(path_out, 'wb') as output_file:
midi_writer = pm.FileWriter(output_file)
midi_writer.write(midi_object)
song = Generated_Song()
print(song.midi_df)
song.sort_midi_df()
print(song.midi_df)
song.add_randomized_notes()
song.export_midi('../data/testmidi.mid') |
from inspect import stack, getframeinfo,getsource
from colorama import Fore,init
from datetime import datetime
# Before reading code u should know
# -> getframeinfo(stack()[1][0]) function getting data about used code line and
# -> that why we can get debug of a code part from program
white=Fore.LIGHTWHITE_EX
green=Fore.GREEN
red=Fore.RED
reset=Fore.RESET
init()
class pyChocolate:
def File(self,frame,kwargs):
return f"{white} file :{frame.filename}" if ifEqual(kwargs,("file",True)) else ""
def Code(self,frame,color,kwargs):
return f"{white} code: {color}{frame.code_context[0].strip()}{reset}" if ifEqual(kwargs,("code",True)) else ""
def Info(self,frame,output,kwargs):
return f"{white}[Line-{frame.lineno}] ->({green+firstValue(frame.code_context[0].strip())+white}) { green}{pretifyOutput(output)} {self.Code(frame,green,kwargs)} {self.File(frame,kwargs)}"
def Warn(self,frame,output,kwargs):
return f"{white}[Line-{frame.lineno}] { red}{pretifyOutput(output)} {self.Code(frame,red,kwargs)} {self.File(frame,kwargs)}"
def LOG(self,frame,output:"debuging content",kwargs)->"return given output":
print(self.Warn(frame,output,kwargs) if ifEqual(kwargs,("mode","warn")) else self.Info(frame,output,kwargs),reset)
return output
def Catch(self,frame,tryFunc,runFunc):
arg1,arg2=tryFunc[1],runFunc[1]
name1,name2=str(tryFunc[0]).split(" ")[1],str(runFunc[0]).split(" ")[1]
string=f"{white}[Line-{frame.lineno}]->(Catch) Func:{ green}{{0}} {white}args:({{1}}{white}){ green} {white} return:{ green}{{2}} {reset}"
try:
rv=tryFunc[0](*arg1)
args=colorfulArgs(arg1)
print(string.format(name1,args,pretifyOutput(rv)))
except Exception as func1err:
try:
rv=runFunc[0](*arg2)
args=colorfulArgs(arg2)
print(string.format(name2,args,pretifyOutput(rv)))
print(white+f"[Catched]->({green+name1+white})({colorfulArgs(arg1)+white}) "+str(func1err)+reset)
print(getsource(tryFunc[0]))
except Exception as func2err:
print(f"{white}[Line-{frame.lineno}]->({ Fore.LIGHTRED_EX}Catch{white}) { red}'error on both functions' {white}[{ red}{name1}{white},{ red}{name2}{white}]{ reset}")
print(white+f"[Catched]->({green+name1+white})({colorfulArgs(arg1)+white}) "+str(func1err)+reset)
print(getsource(tryFunc[0]))
print(white+f"[Catched]->({green+name2+white})({colorfulArgs(arg2)+white}) "+str(func2err)+reset)
print(getsource(runFunc[0]))
return [func1err,func2err]
return rv
def put(self,text):
date=datetime.now().strftime("%H:%M:%S")
print(white+f"[{date}] "+text+reset)
#-----------ChocolateFuncs----------
def ifEqual(kwargs,tuple_):
return True if tuple_ in list(kwargs.items()) else False
def multiSplit(string,args):
for arg in args:
string=string.replace(arg,args[0])
return string.split(args[0])
def getLog(code):
x=multiSplit(code,["(",")"])
try:
i=x.index("Log")
except:
for s in x:
if "Log" in s:
i=x.index(s)
return x[i+1:len(x)-i-1]
def firstValue(code):
code=getLog(code)
end=""
if len(code)>1:
return code[0]+white+")("+green+"".join(code[1])
rv=" ".join(code).split(",")[0]
if rv[0]=="[" or rv[0]=="{" or rv[0]=="(" or rv[0]=='"':
p={"[":"]","{":"}","(":")",'"':'"'}
end="..."+p[rv[0]]
if rv[0]=='"' and rv.endswith('"'):
end=""
if rv[0]=='{' and rv.endswith('}'):
end=""
if rv[0]=='[' and rv.endswith(']'):
end=""
return rv+end
def colorfulArgs(arg):
return ','.join([ green+str(i)+reset if type(i)!=str else green+'"'+str(i)+'"'+reset for i in arg])
def colorfulDicts(output,indent,innerIndent=False):
innerIndent=indent if innerIndent==True else 0
def colorize():
rv=white+"{\n"
for i in list(output.items()):
rv+=f'{indent*" "} {green}"{i[0]}"{white}:'
if isinstance(i[1], dict):
rv+=colorfulDicts(i[1],indent+2,True)+(indent+2)*" "+"\n"
elif isinstance(i[1], str):
rv+=f'{green}"{i[1]}"{reset},\n'
elif isinstance(i[1],list):
rv+=f"{white}[{colorfulArgs(i[1])}{white}]\n"
else:
rv+=f'{i[1]},\n'
return rv
comma="," if innerIndent else ""
return f"{green}"+colorize()+white+(innerIndent*" ")+"}"+comma
def pretifyOutput(output):
if type(output)==str:
return f'"{output}"'
elif type(output)==dict:
return f"{white}rv={green}Dict\n"+colorfulDicts(output,4)+"\n"
elif type(output)==list:
return white+"["+colorfulArgs(output)+white+"]"
else:
return output
#-----------exporting---------------
Chocolate=pyChocolate()
def Log(output:"debuging content",**kwargs)->"return given output":
return Chocolate.LOG(getframeinfo(stack()[1][0]),output,kwargs)
def Catch(tryFunc:"function",runFunc:"function")->"return given output":
return Chocolate.Catch(getframeinfo(stack()[1][0]),tryFunc,runFunc)
def put(text):
Chocolate.put(text)
#-------------Done------------------
|
# sqlite3: SQLite 데이터베이스용 DB-API 2.0 인터페이스
# SQLite는 별도의 서버 프로세스가 필요 없고 SQL 질의 언어의 비표준 변형을 사용한다.
# 데이터베이스에 액세스할 수 있는 경량 디스크 기반 데이터베이스를 제공하는 C 라이브러리이다.
# 일부 응용 프로그램은 내부 데이터 저장을 위해 SQLite를 사용할 수 있다.
# 자세한 내용은 파이썬 공식 문서의 "sqlite3 — SQLite 데이터베이스용 DB-API 2.0 인터페이스"를 참고하면 된다.
# 파이썬 sqlite3 공식 문서 링크: https://docs.python.org/ko/3/library/sqlite3.html
import sqlite3
# 데이터베이스를 생성한다.
con = sqlite3.connect('example.db')
# cursor 객체를 불러온다.
cur = con.cursor()
# 'stocks' 테이블을 생성한다.
cur.execute('''CREATE TABLE stocks
(date text, trans text, symbol text, qty real, price real)
''')
# 'stocks' 테이블에 열 데이터를 추가한다.
cur.execute("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)")
# 'commit'을 통해 데이터를 저장한다.
con.commit()
# 데이터베이스와 연결이 완료되면 'close' 메서드를 통해 연결을 종료한다.
# 작업을 끝낸 후, 모든 변경 사항이 적용되었는지 확인해야 한다.
# 그렇지 않으면 'close' 메서드를 호출할 경우, 변경 내용이 손실된다.
con.close()
|
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QSlider, QWidget, QLabel, QPushButton
from src.common.QssHelper import QssHelper
class Style(object):
@staticmethod
def init_footer_style(slider_volume: QSlider, footer: QWidget, volume: int, btn_zoom: QPushButton, width: int,
height: int):
# ------------------ footer ------------------ #
slider_volume.setValue(volume)
footer.setStyleSheet(QssHelper.load("/main/footer.css"))
btn_zoom.setGeometry(width - 18, height - 18, 14, 14)
btn_zoom.setStyleSheet("QPushButton{border-image:url(./resource/image/缩放.png)}")
btn_zoom.setCursor(Qt.SizeFDiagCursor)
@staticmethod
def init_music_card_style(music_info_widget: QWidget, btn_music_image: QPushButton, music_image_label: QLabel):
music_info_widget.setStyleSheet(
"QWidget#music_info_widget{background-color:#f5f5f7;border:none;border-right:1px solid #e1e1e2;}")
music_info_widget.setCursor(Qt.PointingHandCursor)
btn_music_image.setIconSize(QSize(44, 44))
btn_music_image.setAutoFillBackground(True)
music_image_label.setStyleSheet("QLabel{background-color: rgba(71, 71, 71, 150)}")
music_image_label.setPixmap(QPixmap("./resource/image/全屏.png"))
music_image_label.hide()
|
# -*- coding: utf-8 -*-
# Copyright 2009-2017 Yelp and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all runners."""
import copy
import datetime
import getpass
import logging
import os
import os.path
import posixpath
import pprint
import re
import shutil
import sys
import tarfile
import tempfile
import mrjob.step
from mrjob.compat import translate_jobconf
from mrjob.compat import translate_jobconf_dict
from mrjob.compat import translate_jobconf_for_all_versions
from mrjob.conf import combine_dicts
from mrjob.conf import combine_opts
from mrjob.conf import load_opts_from_mrjob_confs
from mrjob.fs.composite import CompositeFilesystem
from mrjob.fs.local import LocalFilesystem
from mrjob.options import _combiners
from mrjob.options import _deprecated_aliases
from mrjob.options import CLEANUP_CHOICES
from mrjob.parse import is_uri
from mrjob.py2 import PY2
from mrjob.py2 import string_types
from mrjob.setup import WorkingDirManager
from mrjob.setup import name_uniquely
from mrjob.setup import parse_legacy_hash_path
from mrjob.setup import parse_setup_cmd
from mrjob.step import _is_spark_step_type
from mrjob.util import to_lines
from mrjob.util import zip_dir
log = logging.getLogger(__name__)
# use to detect globs and break into the part before and after the glob
GLOB_RE = re.compile(r'^(.*?)([\[\*\?].*)$')
# buffer for piping files into sort on Windows
_BUFFER_SIZE = 4096
# jobconf options for implementing SORT_VALUES
_SORT_VALUES_JOBCONF = {
'mapreduce.partition.keypartitioner.options': '-k1,1',
'stream.num.map.output.key.fields': 2
}
# partitioner for sort_values
_SORT_VALUES_PARTITIONER = \
'org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner'
class MRJobRunner(object):
"""Abstract base class for all runners"""
# this class handles the basic runner framework, options and config files,
# arguments to mrjobs, and setting up job working dirs and environments.
# this will put files from setup scripts, py_files, and bootstrap_mrjob
# into the job's working dir, but won't actually run/import them
#
# command lines to run substeps (including Spark) are handled by
# mrjob.bin.MRJobBinRunner
#: alias for this runner; used for picking section of
#: :py:mod:``mrjob.conf`` to load one of ``'local'``, ``'emr'``,
#: or ``'hadoop'``
alias = None
# libjars is only here because the job can set it; might want to
# handle this with a warning from the launcher instead
OPT_NAMES = {
'bootstrap_mrjob',
'check_input_paths',
'cleanup',
'cleanup_on_failure',
'cmdenv',
'jobconf',
'label',
'libjars',
'local_tmp_dir',
'owner',
'py_files',
'setup',
'upload_archives',
'upload_dirs',
'upload_files'
}
# if this is true, when bootstrap_mrjob is true, add it through the
# setup script
_BOOTSTRAP_MRJOB_IN_SETUP = True
### methods to call from your batch script ###
def __init__(self, mr_job_script=None, conf_paths=None,
extra_args=None, file_upload_args=None,
hadoop_input_format=None, hadoop_output_format=None,
input_paths=None, output_dir=None, partitioner=None,
sort_values=None, stdin=None, step_output_dir=None,
**opts):
"""All runners take the following keyword arguments:
:type mr_job_script: str
:param mr_job_script: the path of the ``.py`` file containing the
:py:class:`~mrjob.job.MRJob`. If this is None,
you won't actually be able to :py:meth:`run` the
job, but other utilities (e.g. :py:meth:`ls`)
will work.
:type conf_paths: None or list
:param conf_paths: List of config files to combine and use, or None to
search for mrjob.conf in the default locations.
:type extra_args: list of str
:param extra_args: a list of extra cmd-line arguments to pass to the
mr_job script. This is a hook to allow jobs to take
additional arguments.
:param file_upload_args: a list of tuples of ``('--ARGNAME', path)``.
The file at the given path will be uploaded
to the local directory of the mr_job script
when it runs, and then passed into the script
with ``--ARGNAME``. Useful for passing in
SQLite DBs and other configuration files to
your job.
:type hadoop_input_format: str
:param hadoop_input_format: name of an optional Hadoop ``InputFormat``
class. Passed to Hadoop along with your
first step with the ``-inputformat``
option. Note that if you write your own
class, you'll need to include it in your
own custom streaming jar (see
:mrjob-opt:`hadoop_streaming_jar`).
:type hadoop_output_format: str
:param hadoop_output_format: name of an optional Hadoop
``OutputFormat`` class. Passed to Hadoop
along with your first step with the
``-outputformat`` option. Note that if you
write your own class, you'll need to
include it in your own custom streaming
jar (see
:mrjob-opt:`hadoop_streaming_jar`).
:type input_paths: list of str
:param input_paths: Input files for your job. Supports globs and
recursively walks directories (e.g.
``['data/common/', 'data/training/*.gz']``). If
this is left blank, we'll read from stdin
:type output_dir: str
:param output_dir: An empty/non-existent directory where Hadoop
should put the final output from the job.
If you don't specify an output directory, we'll
output into a subdirectory of this job's temporary
directory. You can control this from the command
line with ``--output-dir``. This option cannot be
set from configuration files. If used with the
hadoop runner, this path does not need to be fully
qualified with ``hdfs://`` URIs because it's
understood that it has to be on HDFS.
:type partitioner: str
:param partitioner: Optional name of a Hadoop partitioner class, e.g.
``'org.apache.hadoop.mapred.lib.HashPartitioner'``.
Hadoop streaming will use this to determine how
mapper output should be sorted and distributed
to reducers.
:type sort_values: bool
:param sort_values: if true, set partitioners and jobconf variables
so that reducers to receive the values
associated with any key in sorted order (sorted by
their *encoded* value). Also known as secondary
sort.
:param stdin: an iterable (can be a ``BytesIO`` or even a list) to use
as stdin. This is a hook for testing; if you set
``stdin`` via :py:meth:`~mrjob.job.MRJob.sandbox`, it'll
get passed through to the runner. If for some reason
your lines are missing newlines, we'll add them;
this makes it easier to write automated tests.
:type step_output_dir: str
:param step_output_dir: An empty/non-existent directory where Hadoop
should put output from all steps other than
the last one (this only matters for multi-step
jobs). Currently ignored by local runners.
"""
self._ran_job = False
# opts are made from:
#
# empty defaults (everything set to None)
# runner-specific defaults
# opts from config file(s)
# opts from command line
self._opts = self._combine_confs(
[(None, {key: None for key in self.OPT_NAMES})] +
[(None, self._default_opts())] +
load_opts_from_mrjob_confs(self.alias, conf_paths) +
[('the command line', opts)]
)
log.debug('Active configuration:')
log.debug(pprint.pformat({
opt_key: self._obfuscate_opt(opt_key, opt_value)
for opt_key, opt_value in self._opts.items()
}))
self._fs = None
# a local tmp directory that will be cleaned up when we're done
# access/make this using self._get_local_tmp_dir()
self._local_tmp_dir = None
self._working_dir_mgr = WorkingDirManager()
# mapping from dir to path for corresponding archive. we pick
# paths during init(), but don't actually create the archives
# until self._create_dir_archives() is called
self._dir_to_archive_path = {}
# dir archive names (the filename minus ".tar.gz") already taken
self._dir_archive_names_taken = set()
# set of dir_archives that have actually been created
self._dir_archives_created = set()
# track (name, path) of files and archives to upload to spark.
# these are a subset of those in self._working_dir_mgr
self._spark_files = []
self._spark_archives = []
self._upload_mgr = None # define in subclasses that use this
self._script_path = mr_job_script
if self._script_path:
self._working_dir_mgr.add('file', self._script_path)
# give this job a unique name
self._job_key = self._make_unique_job_key(
label=self._opts['label'], owner=self._opts['owner'])
# extra args to our job
self._extra_args = list(extra_args) if extra_args else []
for extra_arg in self._extra_args:
if isinstance(extra_arg, dict):
if extra_arg.get('type') != 'file':
raise NotImplementedError
self._working_dir_mgr.add(**extra_arg)
self._spark_files.append(
(extra_arg['name'], extra_arg['path']))
# extra file arguments to our job
if file_upload_args:
log.warning('file_upload_args is deprecated and will be removed'
' in v0.6.0. Pass dicts to extra_args instead.')
for arg, path in file_upload_args:
arg_file = parse_legacy_hash_path('file', path)
self._working_dir_mgr.add(**arg_file)
self._extra_args.extend([arg, arg_file])
self._spark_files.append((arg_file['name'], arg_file['path']))
# set up uploading
for hash_path in self._opts['upload_files']:
uf = parse_legacy_hash_path('file', hash_path,
must_name='upload_files')
self._working_dir_mgr.add(**uf)
self._spark_files.append((uf['name'], uf['path']))
for hash_path in self._opts['upload_archives']:
ua = parse_legacy_hash_path('archive', hash_path,
must_name='upload_archives')
self._working_dir_mgr.add(**ua)
self._spark_archives.append((ua['name'], ua['path']))
for hash_path in self._opts['upload_dirs']:
# pick name based on directory path
ud = parse_legacy_hash_path('dir', hash_path,
must_name='upload_archives')
# but feed working_dir_mgr the archive's path
archive_path = self._dir_archive_path(ud['path'])
self._working_dir_mgr.add(
'archive', archive_path, name=ud['name'])
self._spark_archives.append((ud['name'], archive_path))
# py_files
# self._setup is a list of shell commands with path dicts
# interleaved; see mrjob.setup.parse_setup_cmd() for details
self._setup = self._parse_setup_and_py_files()
for cmd in self._setup:
for token in cmd:
if isinstance(token, dict):
# convert dir archives tokens to archives
if token['type'] == 'dir':
# feed the archive's path to self._working_dir_mgr
token['path'] = self._dir_archive_path(token['path'])
token['type'] = 'archive'
self._working_dir_mgr.add(**token)
# Where to read input from (log files, etc.)
self._input_paths = input_paths or ['-'] # by default read from stdin
if PY2:
self._stdin = stdin or sys.stdin
else:
self._stdin = stdin or sys.stdin.buffer
self._stdin_path = None # temp file containing dump from stdin
# where a zip file of the mrjob library is stored locally
self._mrjob_zip_path = None
# store output_dir
self._output_dir = output_dir
# store partitioner
self._partitioner = partitioner
# store sort_values
self._sort_values = sort_values
# store step_output_dir
self._step_output_dir = step_output_dir
# store hadoop input and output formats
self._hadoop_input_format = hadoop_input_format
self._hadoop_output_format = hadoop_output_format
# A cache for self._get_steps(); also useful as a test hook
self._steps = None
# this variable marks whether a cleanup has happened and this runner's
# output stream is no longer available.
self._closed = False
### Options ####
def _default_opts(self):
try:
owner = getpass.getuser()
except:
owner = None
return dict(
check_input_paths=True,
cleanup=['ALL'],
cleanup_on_failure=['NONE'],
local_tmp_dir=tempfile.gettempdir(),
owner=owner,
)
def _combine_confs(self, source_and_opt_list):
"""Combine several opt dictionaries into one.
*source_and_opt_list* is a list of tuples of *source*,
*opts* where *opts* is a dictionary and *source* is either
None or a description of where the opts came from (usually a path).
Only override this if you need truly fine-grained control,
including knowledge of the options' source.
"""
opt_list = [
self._fix_opts(opts, source)
for source, opts in source_and_opt_list
]
return self._combine_opts(opt_list)
def _combine_opts(self, opt_list):
"""Combine several opt dictionaries into one. *opt_list*
is a list of dictionaries containing validated options
Override this if you need to base options off the values of
other options, but don't need to issue warnings etc.
about the options' source.
"""
return combine_opts(self._opt_combiners(), *opt_list)
def _opt_combiners(self):
"""A dictionary mapping opt name to combiner funciton. This
won't necessarily include every opt name (we default to
:py:func:`~mrjob.conf.combine_value`).
"""
return _combiners(self.OPT_NAMES)
def _fix_opts(self, opts, source=None):
"""Take an options dictionary, and either return a sanitized
version of it, or raise an exception.
*source* is either a string describing where the opts came from
or None.
This ensures that opt dictionaries are really dictionaries
and handles deprecated options.
"""
if source is None:
source = 'defaults' # defaults shouldn't trigger warnings
if not isinstance(opts, dict):
raise TypeError(
'options for %s (from %s) must be a dict' %
self.runner_alias, source)
deprecated_aliases = _deprecated_aliases(self.OPT_NAMES)
results = {}
for k, v in sorted(opts.items()):
# rewrite deprecated aliases
if k in deprecated_aliases:
if v is None: # don't care
continue
aliased_opt = deprecated_aliases
log.warning('Deprecated option %s (from %s) has been renamed'
' to %s and will be removed in v0.7.0' % (
k, source, aliased_opt))
if opts.get(aliased_opt) is not None:
return # don't overwrite non-aliased opt
k = aliased_opt
if k in self.OPT_NAMES:
results[k] = None if v is None else self._fix_opt(k, v, source)
else:
log.warning('Unexpected option %s (from %s)' % (k, source))
return results
def _fix_opt(self, opt_key, opt_value, source):
"""Fix a single option, returning its correct value or raising
an exception. This is not called for options that are ``None``.
This currently handles cleanup opts.
Override this if you require additional opt validation or cleanup.
"""
if opt_key in ('cleanup', 'cleanup_on_failure'):
return self._fix_cleanup_opt(opt_key, opt_value, source)
else:
return opt_value
def _fix_cleanup_opt(self, opt_key, opt_value, source):
"""Fix a cleanup option, or raise ValueError."""
if isinstance(opt_value, string_types):
opt_value = [opt_value]
if 'NONE' in opt_value and len(set(opt_value)) > 1:
raise ValueError(
'Cannot clean up both nothing and something!'
' (%s option from %s)' % (opt_key, source))
for cleanup_type in opt_value:
if cleanup_type not in CLEANUP_CHOICES:
raise ValueError(
'%s must be one of %s, not %s (from %s)' % (
opt_key, ', '.join(CLEANUP_CHOICES), opt_value,
source))
return opt_value
def _obfuscate_opt(self, opt_key, opt_value):
"""Return value of opt to show in debug printout. Used to obfuscate
credentials, etc."""
return opt_value
### Filesystem object ###
@property
def fs(self):
""":py:class:`~mrjob.fs.base.Filesystem` object for the local
filesystem.
"""
if self._fs is None:
# wrap LocalFilesystem in CompositeFilesystem to get IOError
# on URIs (see #1185)
self._fs = CompositeFilesystem(LocalFilesystem())
return self._fs
### Running the job and parsing output ###
def run(self):
"""Run the job, and block until it finishes.
Raise :py:class:`~mrjob.step.StepFailedException` if there
are any problems (except on
:py:class:`~mrjob.inline.InlineMRJobRunner`, where we raise the
actual exception that caused the step to fail).
"""
if not self._script_path:
raise AssertionError("No script to run!")
if self._ran_job:
raise AssertionError("Job already ran!")
self._create_dir_archives()
self._check_input_paths()
self._run()
self._ran_job = True
def cat_output(self):
"""Stream the jobs output, as a stream of ``bytes``. If there are
multiple output files, there will be an empty bytestring
(``b''``) between them.
.. versionadded:: 0.6.0
In previous versions, you'd use :py:meth:`stream_output`.
"""
output_dir = self.get_output_dir()
if output_dir is None:
raise AssertionError('Run the job before streaming output')
if self._closed is True:
log.warning(
'WARNING! Trying to stream output from a closed runner, output'
' will probably be empty.')
log.info('Streaming final output from %s...' % output_dir)
def split_path(path):
while True:
base, name = os.path.split(path)
# no more elements
if not name:
break
yield name
path = base
def ls_output():
for filename in self.fs.ls(output_dir):
subpath = filename[len(output_dir):]
if not (any(name.startswith('_')
for name in split_path(subpath))):
yield filename
for i, filename in enumerate(ls_output()):
if i > 0:
yield b'' # EOF of previous file
for chunk in self.fs._cat_file(filename):
yield chunk
def stream_output(self):
"""Like :py:meth:`cat_output` except that it groups bytes into
lines. Equivalent to ``mrjob.util.to_lines(runner.stream_output())``.
.. deprecated:: 0.6.0
"""
log.warning('stream_output() is deprecated and will be removed in'
' v0.7.0. use mrjob.util.to_lines(runner.cat_output())'
' instead.')
return to_lines(self.cat_output())
def _cleanup_mode(self, mode=None):
"""Actual cleanup action to take based on various options"""
if self._script_path and not self._ran_job:
return mode or self._opts['cleanup_on_failure']
else:
return mode or self._opts['cleanup']
def _cleanup_cloud_tmp(self):
"""Cleanup any files/directories on cloud storage (e.g. S3) we created
while running this job. Should be safe to run this at any time, or
multiple times.
"""
pass # only EMR runner does this
def _cleanup_hadoop_tmp(self):
"""Cleanup any files/directories on HDFS we created
while running this job. Should be safe to run this at any time, or
multiple times.
"""
pass # only Hadoop runner does this
def _cleanup_local_tmp(self):
"""Cleanup any files/directories on the local machine we created while
running this job. Should be safe to run this at any time, or multiple
times.
This particular function removes any local tmp directories
added to the list self._local_tmp_dirs
This won't remove output_dir if it's outside of our tmp dir.
"""
if self._local_tmp_dir:
log.info('Removing temp directory %s...' % self._local_tmp_dir)
try:
shutil.rmtree(self._local_tmp_dir)
except OSError as e:
log.exception(e)
self._local_tmp_dir = None
def _cleanup_cluster(self):
"""Terminate the cluster if there is one."""
pass # this only happens on EMR
def _cleanup_logs(self):
"""Cleanup any log files that are created as a side-effect of the job.
"""
pass # this only happens on EMR
def _cleanup_job(self):
"""Stop any jobs that we created that are still running."""
pass # currently disabled (see #1241)
def cleanup(self, mode=None):
"""Clean up running jobs, temp files, and logs, subject to the
*cleanup* option passed to the constructor.
If you create your runner in a :keyword:`with` block,
:py:meth:`cleanup` will be called automatically::
with mr_job.make_runner() as runner:
...
# cleanup() called automatically here
:param mode: override *cleanup* passed into the constructor. Should be
a list of strings from :py:data:`CLEANUP_CHOICES`
"""
mode = self._cleanup_mode(mode)
def mode_has(*args):
return any((choice in mode) for choice in args)
if self._script_path and not self._ran_job:
if mode_has('CLUSTER', 'ALL'):
self._cleanup_cluster()
if mode_has('JOB', 'ALL'):
self._cleanup_job()
if mode_has('ALL', 'TMP', 'CLOUD_TMP'):
self._cleanup_cloud_tmp()
if mode_has('ALL', 'TMP', 'HADOOP_TMP'):
self._cleanup_hadoop_tmp()
if mode_has('ALL', 'TMP', 'LOCAL_TMP'):
self._cleanup_local_tmp()
if mode_has('ALL', 'LOGS'):
self._cleanup_logs()
self._closed = True
def counters(self):
"""Get counters associated with this run in this form::
[{'group name': {'counter1': 1, 'counter2': 2}},
{'group name': ...}]
The list contains an entry for every step of the current job.
"""
raise NotImplementedError
### hooks for the with statement ###
def __enter__(self):
"""Don't do anything special at start of with block"""
return self
def __exit__(self, type, value, traceback):
"""Call self.cleanup() at end of with block."""
self.cleanup()
### more runner information ###
def get_opts(self):
"""Get options set for this runner, as a dict."""
log.warning('get_opts() is deprecated and will be removed in v0.7.0')
return copy.deepcopy(self._opts)
def get_job_key(self):
"""Get the unique key for the job run by this runner.
This has the format ``label.owner.date.time.microseconds``
"""
return self._job_key
def get_output_dir(self):
"""Find the directory containing the job output. If the job hasn't
run yet, returns None"""
if self._script_path and not self._ran_job:
return None
return self._output_dir
### other methods you need to implement in your subclass ###
def get_hadoop_version(self):
"""Return the version number of the Hadoop environment as a string if
Hadoop is being used or simulated. Return None if not applicable.
:py:class:`~mrjob.emr.EMRJobRunner` infers this from the cluster.
:py:class:`~mrjob.hadoop.HadoopJobRunner` gets this from
``hadoop version``. :py:class:`~mrjob.local.LocalMRJobRunner` has an
additional `hadoop_version` option to specify which version it
simulates.
:py:class:`~mrjob.inline.InlineMRJobRunner` does not simulate Hadoop at
all.
"""
return None
# you'll probably wan't to add your own __init__() and cleanup() as well
def _run(self):
"""Run the job."""
raise NotImplementedError
### internal utilities for implementing MRJobRunners ###
def _get_local_tmp_dir(self):
"""Create a tmp directory on the local filesystem that will be
cleaned up by self.cleanup()"""
if not self._local_tmp_dir:
path = os.path.join(self._opts['local_tmp_dir'], self._job_key)
log.info('Creating temp directory %s' % path)
if os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path)
self._local_tmp_dir = path
return self._local_tmp_dir
def _make_unique_job_key(self, label=None, owner=None):
"""Come up with a useful unique ID for this job.
We use this to choose the output directory, etc. for the job.
"""
# use the name of the script if one wasn't explicitly
# specified
if not label:
if self._script_path:
label = os.path.basename(self._script_path).split('.')[0]
else:
label = 'no_script'
if not owner:
owner = 'no_user'
now = datetime.datetime.utcnow()
return '%s.%s.%s.%06d' % (
label, owner,
now.strftime('%Y%m%d.%H%M%S'), now.microsecond)
def _get_steps(self):
"""Call the job script to find out how many steps it has, and whether
there are mappers and reducers for each step. Validate its
output.
Returns output as described in :ref:`steps-format`.
Results are cached, so call this as many times as you want.
"""
if self._steps is None:
self._steps = self._load_steps()
return self._steps
def _load_steps(self):
"""Ask job how many steps it has, and whether
there are mappers and reducers for each step.
Returns output as described in :ref:`steps-format`.
"""
raise NotImplementedError
def _get_step(self, step_num):
"""Get a single step (calls :py:meth:`_get_steps`)."""
return self._get_steps()[step_num]
def _num_steps(self):
"""Get the number of steps (calls :py:meth:`get_steps`)."""
return len(self._get_steps())
def _has_streaming_steps(self):
"""Are any of our steps Hadoop streaming steps?"""
return any(step['type'] == 'streaming'
for step in self._get_steps())
def _has_spark_steps(self):
"""Are any of our steps Spark steps (either spark or spark_script)"""
return any(_is_spark_step_type(step['type'])
for step in self._get_steps())
def _args_for_task(self, step_num, mrc):
return [
'--step-num=%d' % step_num,
'--%s' % mrc,
] + self._mr_job_extra_args()
def _mr_job_extra_args(self, local=False):
"""Return arguments to add to every invocation of MRJob.
:type local: boolean
:param local: if this is True, use files' local paths rather than
the path they'll have inside Hadoop streaming
"""
result = []
for extra_arg in self._extra_args:
if isinstance(extra_arg, dict):
if local:
result.append(extra_arg['path'])
else:
result.append(self._working_dir_mgr.name(**extra_arg))
else:
result.append(extra_arg)
return result
def _dir_archive_path(self, dir_path):
"""Assign a path for the archive of *dir_path* but don't
actually create anything."""
if dir_path not in self._dir_to_archive_path:
# we can check local paths now
if not (is_uri(dir_path) or os.path.isdir(dir_path)):
raise OSError('%s is not a directory!' % dir_path)
name = name_uniquely(
dir_path, names_taken=self._dir_archive_names_taken)
self._dir_archive_names_taken.add(name)
self._dir_to_archive_path[dir_path] = os.path.join(
self._get_local_tmp_dir(), 'archives', name + '.tar.gz')
return self._dir_to_archive_path[dir_path]
def _create_dir_archives(self):
"""Call this to create all dir archives"""
for dir_path in sorted(set(self._dir_to_archive_path)):
self._create_dir_archive(dir_path)
def _create_dir_archive(self, dir_path):
"""Helper for :py:meth:`archive_dir`"""
if not self.fs.exists(dir_path):
raise OSError('%s does not exist')
tar_gz_path = self._dir_archive_path(dir_path)
if tar_gz_path in self._dir_archives_created:
return # already created
if not os.path.isdir(os.path.dirname(tar_gz_path)):
os.makedirs(os.path.dirname(tar_gz_path))
# for remote files
tmp_download_path = os.path.join(
self._get_local_tmp_dir(), 'tmp-download')
log.info('Archiving %s -> %s' % (dir_path, tar_gz_path))
with tarfile.open(tar_gz_path, mode='w:gz') as tar_gz:
for path in self.fs.ls(dir_path):
# fs.ls() only lists files
if path == dir_path:
raise OSError('%s is a file, not a directory!' % dir_path)
# TODO: do we need this?
if os.path.realpath(path) == os.path.realpath(tar_gz_path):
raise OSError(
'attempted to archive %s into itself!' % tar_gz_path)
if is_uri(path):
path_in_tar_gz = path[len(dir_path):].lstrip('/')
log.info(' downloading %s -> %s' % (
path, tmp_download_path))
with open(tmp_download_path, 'wb') as f:
for chunk in self.fs.cat(path):
f.write(chunk)
local_path = tmp_download_path
else:
path_in_tar_gz = path[len(dir_path):].lstrip(os.sep)
local_path = path
log.debug(' adding %s to %s' % (path, tar_gz_path))
tar_gz.add(local_path, path_in_tar_gz, recursive=False)
self._dir_archives_created.add(tar_gz_path)
def _bootstrap_mrjob(self):
"""Should we bootstrap mrjob?"""
if self._opts['bootstrap_mrjob'] is None:
return self._opts['interpreter'] is None
else:
return bool(self._opts['bootstrap_mrjob'])
def _get_input_paths(self):
"""Get the paths to input files, dumping STDIN to a local
file if need be."""
if '-' in self._input_paths:
if self._stdin_path is None:
# prompt user, so they don't think the process has stalled
log.info('reading from STDIN')
stdin_path = os.path.join(self._get_local_tmp_dir(), 'STDIN')
log.debug('dumping stdin to local file %s' % stdin_path)
with open(stdin_path, 'wb') as stdin_file:
for line in self._stdin:
# catch missing newlines (often happens with test data)
if not line.endswith(b'\n'):
line += b'\n'
stdin_file.write(line)
self._stdin_path = stdin_path
return [self._stdin_path if p == '-' else p for p in self._input_paths]
def _check_input_paths(self):
"""Check that input exists prior to running the job, if the
`check_input_paths` option is true."""
if not self._opts['check_input_paths']:
return
for path in self._input_paths:
if path == '-':
continue # STDIN always exists
if not self.fs.can_handle_path(path):
continue # e.g. non-S3 URIs on EMR
if not self.fs.exists(path):
raise IOError(
'Input path %s does not exist!' % (path,))
def _intermediate_output_uri(self, step_num, local=False):
"""A URI for intermediate output for the given step number."""
join = os.path.join if local else posixpath.join
return join(
self._step_output_dir or self._default_step_output_dir(),
'%04d' % step_num)
def _default_step_output_dir(self):
"""Where to put output for steps other than the last one,
if not specified by the *output_dir* constructor keyword.
Usually you want this to be on HDFS (most efficient).
Define this in your runner subclass.
"""
raise NotImplementedError
def _step_input_uris(self, step_num):
"""A list of URIs to use as input for the given step. For all
except the first step, this list will have a single item (a
directory)."""
if step_num == 0:
return [self._upload_mgr.uri(path)
for path in self._get_input_paths()]
else:
return [self._intermediate_output_uri(step_num - 1)]
def _step_output_uri(self, step_num):
"""URI to use as output for the given step. This is either an
intermediate dir (see :py:meth:`intermediate_output_uri`) or
``self._output_dir`` for the final step."""
if step_num == len(self._get_steps()) - 1:
return self._output_dir
else:
return self._intermediate_output_uri(step_num)
def _interpolate_input_and_output(self, args, step_num):
"""Replace :py:data:`~mrjob.step.INPUT` and
:py:data:`~mrjob.step.OUTPUT` in arguments to a jar or Spark
step.
If there are multiple input paths (i.e. on the first step), they'll
be joined with a comma.
"""
def interpolate(arg):
if arg == mrjob.step.INPUT:
return ','.join(self._step_input_uris(step_num))
elif arg == mrjob.step.OUTPUT:
return self._step_output_uri(step_num)
else:
return arg
return [interpolate(arg) for arg in args]
def _create_mrjob_zip(self):
"""Make a zip of the mrjob library, without .pyc or .pyo files,
This will also set ``self._mrjob_zip_path`` and return it.
Typically called from
:py:meth:`_create_setup_wrapper_script`.
It's safe to call this method multiple times (we'll only create
the zip file once.)
"""
if not self._mrjob_zip_path:
# find mrjob library
import mrjob
if not os.path.basename(mrjob.__file__).startswith('__init__.'):
raise Exception(
"Bad path for mrjob library: %s; can't bootstrap mrjob",
mrjob.__file__)
mrjob_dir = os.path.dirname(mrjob.__file__) or '.'
zip_path = os.path.join(self._get_local_tmp_dir(), 'mrjob.zip')
def filter_path(path):
filename = os.path.basename(path)
return not(filename.lower().endswith('.pyc') or
filename.lower().endswith('.pyo') or
# filter out emacs backup files
filename.endswith('~') or
# filter out emacs lock files
filename.startswith('.#') or
# filter out MacFuse resource forks
filename.startswith('._'))
log.debug('archiving %s -> %s as %s' % (
mrjob_dir, zip_path, os.path.join('mrjob', '')))
zip_dir(mrjob_dir, zip_path, filter=filter_path, prefix='mrjob')
self._mrjob_zip_path = zip_path
return self._mrjob_zip_path
def _jobconf_for_step(self, step_num):
"""Get the jobconf dictionary, optionally including step-specific
jobconf info.
Also translate jobconfs to the current Hadoop version, if necessary.
"""
step = self._get_step(step_num)
# _sort_values_jobconf() isn't relevant to Spark,
# but it doesn't do any harm either
jobconf = combine_dicts(self._sort_values_jobconf(),
self._opts['jobconf'],
step.get('jobconf'))
# if user is using the wrong jobconfs, add in the correct ones
# and log a warning
hadoop_version = self.get_hadoop_version()
if hadoop_version:
jobconf = translate_jobconf_dict(jobconf, hadoop_version)
return jobconf
def _sort_values_jobconf(self):
"""Jobconf dictionary to enable sorting by value.
"""
if not self._sort_values:
return {}
# translate _SORT_VALUES_JOBCONF to the correct Hadoop version,
# without logging a warning
hadoop_version = self.get_hadoop_version()
jobconf = {}
for k, v in _SORT_VALUES_JOBCONF.items():
if hadoop_version:
jobconf[translate_jobconf(k, hadoop_version)] = v
else:
for j in translate_jobconf_for_all_versions(k):
jobconf[j] = v
return jobconf
def _sort_values_partitioner(self):
"""Partitioner to use with *sort_values* keyword to the constructor."""
if self._sort_values:
return _SORT_VALUES_PARTITIONER
else:
return None
def _parse_setup_and_py_files(self):
"""Parse the *setup* option with
:py:func:`mrjob.setup.parse_setup_cmd()`, and patch in *py_files*.
"""
setup = []
# py_files
for path in self._opts['py_files']:
# Spark (at least v1.3.1) doesn't work with # and --py-files,
# see #1375
if '#' in path:
raise ValueError("py_files cannot contain '#'")
path_dict = parse_legacy_hash_path('file', path)
setup.append(['export PYTHONPATH=', path_dict, ':$PYTHONPATH'])
# setup
for cmd in self._opts['setup']:
setup.append(parse_setup_cmd(cmd))
return setup
def _upload_args(self):
# just upload every file and archive in the working dir manager
return self._upload_args_helper('-files', None, '-archives', None)
def _upload_args_helper(
self, files_opt_str, files, archives_opt_str, archives):
args = []
file_hash_paths = list(self._arg_hash_paths('file', files))
if file_hash_paths:
args.append(files_opt_str)
args.append(','.join(file_hash_paths))
archive_hash_paths = list(self._arg_hash_paths('archive', archives))
if archive_hash_paths:
args.append(archives_opt_str)
args.append(','.join(archive_hash_paths))
return args
def _arg_hash_paths(self, type, named_paths=None):
"""Helper function for the *upload_args methods."""
if named_paths is None:
# just return everything managed by _working_dir_mgr
named_paths = sorted(
self._working_dir_mgr.name_to_path(type).items())
for name, path in named_paths:
if not name:
name = self._working_dir_mgr.name(type, path)
uri = self._upload_mgr.uri(path)
yield '%s#%s' % (uri, name)
def _fix_env(env):
"""Convert environment dictionary to strings (Python 2.7 on Windows
doesn't allow unicode)."""
def _to_str(s):
if isinstance(s, string_types) and not isinstance(s, str):
return s.encode('utf_8')
else:
return s
return dict((_to_str(k), _to_str(v)) for k, v in env.items())
|
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description: 生成仿真评论
"""
import os
from textgen.unsup_generation.phrase import load_list, caculate_word_idf, text2review, find_word_phrase, get_seg_pos
from textgen.unsup_generation.util import text2seg_pos, get_aspect_express, get_candidate_aspect, \
merge_aspect_express, fake_review_filter, generate_reviews, NSDict, PairPattSort
pwd_path = os.path.abspath(os.path.dirname(__file__))
default_stopwords_path = os.path.join(pwd_path, '../data/stopwords.txt')
default_pos_adj_word_path = os.path.join(pwd_path, '../data/HowNetPOSWord.txt')
class Generate:
def __init__(self, docs):
print('docs_text len:', len(docs))
# 加载停用词
self.stopwords = set(load_list(default_stopwords_path))
# 计算除去停用词的每个词的idf值
self.word_idf, self.seg_pos_text = caculate_word_idf(docs, self.stopwords)
review_list, all_word = text2review(self.seg_pos_text)
phrase_list = find_word_phrase(all_word, review_list)
print('find new word:', phrase_list)
# 加载正向情感词典
self.pos_adj_word = load_list(default_pos_adj_word_path)
def generate(self, doc, num=1000, is_uniq=True):
seg_pos_text = [get_seg_pos(l) for l in doc]
seg_list, pos_list, seg_review_list = text2seg_pos(seg_pos_text, pattern='[。!?,~]')
raw_aspect_list = get_candidate_aspect(seg_list, pos_list, self.pos_adj_word, self.stopwords, self.word_idf)
# 构建候选集合
N = NSDict(seg_list, pos_list, raw_aspect_list)
ns_dict = N.build_nsdict()
# 候选集合排序
P = PairPattSort(ns_dict)
pair_score = P.sort_pair()
# 得到正确的观点表达候选
pair_useful = {}
baseline = 0.1 * len(pair_score)
for i, item in enumerate(pair_score):
if i <= baseline:
aspect, opinion = item[0].split('\t')
if aspect in pair_useful:
pair_useful[aspect].append(opinion)
else:
pair_useful[aspect] = [opinion]
# 从原始评论中抽取观点表达
aspect_express = get_aspect_express(seg_review_list, pair_useful)
# 字符匹配合并aspect
merged_aspect_express, opinion_set = merge_aspect_express(aspect_express, pair_useful)
# 生成假评论
generated_raw_reviews = generate_reviews(merged_aspect_express, num=num)
results = fake_review_filter(generated_raw_reviews, opinion_set, is_uniq=is_uniq)
return results
if __name__ == '__main__':
sample1 = load_list(os.path.join(pwd_path, '../data/12617.txt'))
docs_text = [["挺好的,速度很快,也很实惠,不知效果如何",
"产品没得说,买了以后就降价,心情不美丽。",
"刚收到,包装很完整,不错",
"发货速度很快,物流也不错,同一时间买的两个东东,一个先到一个还在路上。这个水水很喜欢,不过盖子真的开了。盖不牢了现在。",
"包装的很好,是正品",
"被种草兰蔻粉水三百元一大瓶囤货,希望是正品好用,收到的时候用保鲜膜包裹得严严实实,只敢买考拉自营的护肤品",
],
['很温和,清洗的也很干净,不油腻,很不错,会考虑回购,第一次考拉买护肤品,满意',
'这款卸妆油我会无限回购的。即使我是油痘皮,也不会闷痘,同时在脸部按摩时,还能解决白头的脂肪粒的问题。用清水洗完脸后,非常的清爽。',
'自从用了fancl之后就不用其他卸妆了,卸的舒服又干净',
'买贵了,大润发才卖79。9。',
],
sample1
]
m = Generate(docs_text)
r = m.generate(sample1[:500])
print('size:', len(r))
for review in r:
print('\t' + review)
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import numpy as np
from scipy import ndimage
from abc import ABCMeta, abstractmethod
from PIL import Image, ImageEnhance, ImageFilter
class Augmentor(metaclass=ABCMeta):
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.__dict__)
@abstractmethod
def __call__(self, *args, **kwargs):
NotImplementedError()
def split_input_tensor(self, input_tensor):
"""
input_tensor: np.ndarray with shape (H, W, 6)
return: ndarray(H, W, 3), ndarray(H, W, 3)
"""
return input_tensor[..., :3], input_tensor[..., 3:]
class ColorConverter(Augmentor):
"""
Augmentors converting pixel color properties
"""
def __call__(self, image, *args, **kwargs):
image_a, image_b = self.split_input_tensor(image)
factor = np.random.uniform(self.min_value, self.max_value)
processed_tensor = np.concatenate([
self.process(image_a, factor),
self.process(image_b, factor)
], axis=2)
return dict({"image": processed_tensor}, **kwargs)
def process(self, *args, **kwargs):
NotImplementedError()
class Brightness(ColorConverter):
"""
Adjusting image brightness.
reference:
https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html#PIL.ImageEnhance.PIL.ImageEnhance.Brightness
args: min_value, max_value:
An enhancement factor of 0.0 gives a black image.
A factor of 1.0 gives the original image.
"""
def __init__(self, min_value=0.75, max_value=1.25):
assert min_value >= 0 and max_value >= 0, "Negative value not allowed!"
self.min_value, self.max_value = min_value, max_value
def process(self, image, factor):
pil_image = Image.fromarray(image)
enhancer = ImageEnhance.Brightness(pil_image)
processed_image = enhancer.enhance(factor)
return np.array(processed_image)
class Color(ColorConverter):
"""
Adjusting image color.
reference:
https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html#PIL.ImageEnhance.PIL.ImageEnhance.Color
args: min_value, max_value
An enhancement factor of 0.0 gives a black and white image.
A factor of 1.0 gives the original image.
"""
def __init__(self, min_value=0.75, max_value=1.25):
assert min_value >= 0 and max_value >= 0, "Negative value not allowed!"
self.min_value, self.max_value = min_value, max_value
def process(self, image, factor):
pil_image = Image.fromarray(image)
enhancer = ImageEnhance.Color(pil_image)
processed_image = enhancer.enhance(factor)
return np.array(processed_image)
class Contrast(ColorConverter):
"""
Adjusting image contrast.
reference:
https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html#PIL.ImageEnhance.PIL.ImageEnhance.Contrast
args: min_value, max_value
An enhancement factor of 0.0 gives a solid grey image.
A factor of 1.0 gives the original image.
"""
def __init__(self, min_value=0.75, max_value=1.25):
assert min_value >= 0 and max_value >= 0, "Negative value not allowed!"
self.min_value, self.max_value = min_value, max_value
def process(self, image, factor):
pil_image = Image.fromarray(image)
enhancer = ImageEnhance.Contrast(pil_image)
processed_image = enhancer.enhance(factor)
return np.array(processed_image)
class Hue(ColorConverter):
"""
Adjusting image hue.
args: min_value, max_value
An enhancement factor of 0.0 gives a solid grey image.
A factor of 1.0 gives the original image.
"""
def __init__(self, min_value=-10.0, max_value=10.0):
assert min_value > -255 and max_value < 255, \
"Value range should be within (-255, 255)!"
self.min_value, self.max_value = min_value, max_value
def process(self, image, factor):
pil_image = Image.fromarray(image)
hsv_image = np.array(pil_image.convert("HSV"))
hsv_image[:, :, 0] = hsv_image[:, :, 0] + factor
processed_image = Image.fromarray(hsv_image, "HSV").convert("RGB")
return np.array(processed_image)
class Gamma(ColorConverter):
"""
Gamma blur filter.
"""
def __init__(self, min_value=0.0, max_value=1.0):
assert min_value >= 0 and max_value >= 0, "Negative value not allowed!"
self.min_value, self.max_value = min_value, max_value
def process(self, image, gamma):
processed_image = (((image / 255.0) ** gamma) * 255.0).astype(np.uint8)
return processed_image
class GaussianBlur(ColorConverter):
"""
Gaussian blur filter.
reference:
https://pillow.readthedocs.io/en/stable/reference/ImageFilter.html#PIL.ImageFilter.GaussianBlur
args: min_value, max_value
References default is 2.
"""
def __init__(self, min_value=0.0, max_value=1.0):
assert min_value >= 0 and max_value >= 0, "Negative value not allowed!"
self.min_value, self.max_value = min_value, max_value
def process(self, image, radius):
pil_image = Image.fromarray(image)
processed_image = pil_image.filter(ImageFilter.GaussianBlur(radius))
return np.array(processed_image)
class GaussianNoise(Augmentor):
"""
Additive Gaussian noise.
"""
def __init__(self, min_value=0.0, max_value=1.0):
assert min_value >= 0 and max_value >= 0, "Negative value not allowed!"
self.min_value, self.max_value = min_value, max_value
def __call__(self, image, label, **kwargs):
# print(image.shape)
noise_amp = np.random.uniform(self.min_value, self.max_value)
image_noise = noise_amp * np.random.randn(*image.shape)
processed_image = image + image_noise
processed_image[processed_image < 0] = 0
processed_image[processed_image > 255] = 255
processed_image = processed_image.astype(np.uint8)
return dict({
"image": processed_image, "label": label}, **kwargs)
class FlipTopBottom(Augmentor):
"""
Flip top bottom.
args: probability
Probability for flipping.
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, label, **kwargs):
if np.random.rand() < self.prob:
image = image[::-1, ...]
label = label[::-1, ...]
label[:, :, 1] *= -1.0
return dict({
"image": image, "label": label}, **kwargs)
class FlipLeftRight(Augmentor):
"""
Flip left right.
args: probability
Probability for flipping.
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, label, **kwargs):
if np.random.rand() < self.prob:
image = image[:, ::-1, :]
label = label[:, ::-1, :]
label[:, :, 0] *= -1.0
return dict({
"image": image, "label": label}, **kwargs)
class Identity(Augmentor):
"""
create the pair of images with no change
args: probability
Probability for applying this process.
"""
def __init__(self, prob=0.1):
self.prob = prob
def __call__(self, image, label, **kwargs):
if np.random.rand() < self.prob:
image[..., :3] = image[..., 3:]
label[:] = 0.0
return dict({
"image": image, "label": label}, **kwargs)
class Rotate(Augmentor):
"""
Rotating image
"""
def __init__(self, min_value=-15, max_value=15):
self.min_value, self.max_value = min_value, max_value
def __call__(self, image, label, **kwargs):
ang = np.random.uniform(self.min_value, self.max_value)
deg = ang * np.pi / 180
rot_mat = np.array([
[np.cos(deg), -np.sin(deg)],
[np.sin(deg), np.cos(deg)],
])
image_new = ndimage.rotate(image, ang, reshape=False, cval=0.0)
image_new = image_new.astype(np.uint8)
flow_new = np.array(label.dot(rot_mat.T))
flow_new = ndimage.rotate(flow_new, ang, reshape=False, cval=0.0)
return dict({
"image": image_new, "label": flow_new}, **kwargs)
class Scale(Augmentor):
"""
Scaling image
"""
def __init__(self, min_value=1.0, max_value=2.0):
assert min_value >= 1.0 or max_value >= 1.0, \
"scaling parameter should be greater than 1.0"
self.min_value, self.max_value = min_value, max_value
def random_crop(self, data, crop_size):
height, width, _ = data.shape
if height == crop_size[0] or width == crop_size[1]:
return data
top = np.random.randint(0, height - crop_size[0])
left = np.random.randint(0, width - crop_size[1])
bottom = top + crop_size[0]
right = left + crop_size[1]
return data[top:bottom, left:right, :]
def __call__(self, image, label, **kwargs):
image_size = image.shape[:2]
factor = np.random.uniform(self.min_value, self.max_value)
data = np.concatenate([image, label * factor], axis=2)
zoomed_data = ndimage.zoom(data, [factor, factor, 1], order=1)
data = self.random_crop(zoomed_data, crop_size=image_size)
image_new = data[..., :-2]
image_new[image_new < 0] = 0
image_new[image_new > 255] = 255
image_new = image_new.astype(np.uint8)
label_new = data[..., -2:]
return dict({"image": image_new, "label": label_new}, **kwargs)
class Translate(Augmentor):
"""
Shifting image
"""
def __init__(self, min_value=-0.2, max_value=0.2):
self.min_value, self.max_value = min_value, max_value
def __call__(self, image, label, **kwargs):
image_size = image.shape[:2]
dh = np.random.uniform(self.min_value, self.max_value)
dw = np.random.uniform(self.min_value, self.max_value)
shift = [int(image_size[0] * dh), int(image_size[1] * dw), 0]
shifted_image = ndimage.shift(image, shift, order=1, cval=0)
shifted_label = ndimage.shift(label, shift, order=1, cval=0)
return dict({"image": shifted_image, "label": shifted_label}, **kwargs)
|
import nltk
import pickle
import random
from nltk.classify import ClassifierI
from nltk.classify.scikitlearn import SklearnClassifier
from nltk.corpus import movie_reviews
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.svm import NuSVC
from sklearn.svm import SVC
from statistics import mode
documents = [(list(movie_reviews.words(fieldid)), category)
for category in movie_reviews.categories()
for fieldid in movie_reviews.fileids(category)]
random.shuffle(documents, random=None)
words_list = []
for w in movie_reviews.words():
words_list.append(w.lower())
words_list = nltk.FreqDist(words_list)
word_feature = list(words_list.keys())[50:5000]
def featuresFind(document):
feature = {}
words = set(document)
for w in word_feature:
feature[w] = (w in words)
return feature
featureSet = [(featuresFind(rev), category) for (rev, category) in documents]
trainSet = featureSet[:1400]
testSet = featureSet[1400:]
# trainSet = featureSet[100:1900]
# testSet = featureSet[1900:]
list_of_classifier = [MultinomialNB, LogisticRegression,
SGDClassifier, SVC, LinearSVC, NuSVC]
listClassifier = []
for classifier in list_of_classifier:
classifier_name = SklearnClassifier(classifier())
listClassifier.append(classifier_name)
classifier_name.train(trainSet)
print('{} Accuracy : {}'.format(classifier.__name__,
nltk.classify.accuracy(classifier_name, testSet)*100))
class ClassifierVote(ClassifierI):
def __init__(self, classifiers):
self._classifiers = classifiers
def mode(self, features):
votes = []
for classifier in self._classifiers:
vote = classifier.classify(features)
votes.append(vote)
return mode(votes)
def confidence(self, features):
votes = []
for classifier in self._classifiers:
vote = classifier.classify(features)
votes.append(vote)
vote_choice = votes.count(mode(votes))
confi = vote_choice / len(votes)
return confi
votes_classified = ClassifierVote(listClassifier)
for i in range(5):
print('Confidence : {}'.format(votes_classified.confidence(testSet[i][0])*100))
print('Mode : {}'.format(votes_classified.mode(testSet[i][0])))
|
"""Module allowing for ``python -m lexos ...``."""
from lexos import application
application.run()
|
"""Tools for getting spectra for lya fitting.
Includes choosing a data file for each star, reading the files, and
processing the spectral data (from either IUE, STIS, ...) into a format
that can be used directly for the fitting.
The variable target_use_which_spectrum indicates which data to use for
each star. It can be customized by editing this file. Running this
module directly will print out the default value for this dictionary.
"""
from astropy.table import Table
from astropy.io import fits
import numpy as np
from pathlib import Path
from warnings import warn
from scipy.interpolate import interp1d
import collections
# \(swp[0-9]\{5\}\)
# can be manually tweaked. If the value is a list or contains *, the
# spectra will be coadded
target_use_which_spectrum = {
"HD097471": "data/HD097471/mastDownload/IUE/swp19375/swp19375mxlo_vo.fits",
"HD037525": "data/HD037525/mastDownload/IUE/swp27579/swp27579.mxhi.gz",
"HD093827": "data/HD093827/mastDownload/IUE/swp50536/swp50536.mxhi.gz",
# "HD093827": "data/HD093827/*mxlo_vo.fits",
"HD051013": "data/HD051013/mastDownload/IUE/swp22860/swp22860.mxhi.gz",
"HD096675": "data/HD096675/mastDownload/IUE/swp41717/swp41717.mxhi.gz",
"HD023060": "data/HD023060/mastDownload/IUE/swp11151/swp11151mxlo_vo.fits",
"HD099872": "data/HD099872/mastDownload/HST/**/*_x1d.fits",
# "HD152248": "data/HD152248/mastDownload/IUE/swp54576/swp54576.mxhi.gz",
"HD152248": "data/HD152248/**/*.mxhi.gz",
"HD209339": "data/HD209339/mastDownload/HST/**/*_x1d.fits",
# "HD197770": "data/HD197770/mastDownload/HST/oedl04010/oedl04010_x1d.fits",
"HD197770": "data/HD197770/**/*.mxhi.gz",
"HD037332": "data/HD037332/mastDownload/IUE/swp32289/swp32289.mxhi.gz",
"HD093028": "data/HD093028/mastDownload/IUE/swp05521/swp05521.mxhi.gz",
# "HD062542": "data/HD062542/mastDownload/HST/obik01020/obik01020_x1d.fits", # wavelength range
# "HD062542": "data/HD062542/*.mxhi.gz", # way too noisy
"HD062542": "data/HD062542/**/*mxlo_vo.fits",
# "HD190603": "data/HD190603/*.mxhi.gz",
"HD190603": "data/HD190603/**/*mxlo_vo.fits",
# "HD046202": "data/HD046202/mastDownload/IUE/swp08845/swp08845.mxhi.gz",
# "HD046202": "data/HD046202/mastDownload/HST/ocb6e0030/ocb6e0030_x1d.fits",
# "HD046202": "data/HD046202/mastDownload/HST/ocb6e1030/ocb6e1030_x1d.fits",
"HD046202": "data/HD046202/mastDownload/HST/**/*_x1d.fits",
# "HD047129": "data/HD047129/mastDownload/IUE/swp07077/swp07077.mxhi.gz",
"HD047129": "data/HD047129/**/*.mxhi.gz",
"HD235874": "data/HD235874/mastDownload/IUE/swp34158/swp34158mxlo_vo.fits",
"HD216898": "data/HD216898/swp43934.mxhi.gz",
# "HD216898": "data/HD216898/mastDownload/IUE/swp17175/swp17175mxlo_vo.fits",
"HD326329": "data/HD326329/mastDownload/IUE/swp48698/swp48698.mxhi.gz",
"HD179406": [
"data/HD179406/mastDownload/IUE/swp08974/swp08974.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp08976/swp08976.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp13865/swp13865.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp36939/swp36939.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp36940/swp36940.mxhi.gz",
],
"BD+52d3210": "data/BD+52d3210/mastDownload/IUE/swp34153/swp34153mxlo_vo.fits",
"BD+56d524": "data/BD+56d524/mastDownload/IUE/swp20330/swp20330mxlo_vo.fits",
# data for comparison to existing HI results
"HD094493": "data/HD094493/mastDownload/HST/o54306010/o54306010_x1d.fits",
"HD045314": "data/HD045314/mastDownload/IUE/**/*mxhi.gz"
}
# namedtuple defines a simple class
Spectrum = collections.namedtuple(
"Spectrum", ["wavs", "flux", "errs", "net", "exptime"]
)
def processed(target, wmin=0, wmax=1400, disp=0.25):
"""Get spectrum data ready for fitting Lya for the given target.
Tweak the variable get_spectrum.target_use_which_spectrum to choose
the right data. Depending on whether a IUE or STIS spectrum was
chosen, different steps will be taken. The end result is the
spectral data in a common format, processed with different steps
depending on the source of the data.
Returns
-------
wav, flux: ndarray of wavelengths (angstrom) and fluxes (erg s-1 cm-2 angstrom-1)
"""
# choose data
filename = target_use_which_spectrum[target]
print("Getting data from ", filename)
spectrum, rebin = auto_wavs_flux_errs(filename)
if rebin:
binnedwavs, binnedflux = rebin_spectrum_around_lya(spectrum, wmin, wmax, disp)
else:
wavs, flux = spectrum.wavs, spectrum.flux
use = np.logical_and(wmin < wavs, wavs < wmax)
binnedwavs, binnedflux = wavs[use], flux[use]
# remove nans (these are very annoying when they propagate, e.g.
# max([array with nan]) = nan).
safe = np.isfinite(binnedflux)
safewavs = binnedwavs[safe]
safeflux = binnedflux[safe]
return safewavs, safeflux, filename
def auto_wavs_flux_errs(filename):
"""Load spectrum or multiple spectra based on file name."""
# determine if multiple files were provided. If a glob pattern was provided, this counts as
if isinstance(filename, list):
to_be_coadded = filename
elif isinstance(filename, str):
if "*" in filename:
to_be_coadded = [str(p) for p in Path(".").glob(filename)]
elif "x1d" in filename:
# a single x1d file can contain multiple extensions, which
# need to be coadded
to_be_coadded = [filename]
else:
to_be_coadded = None
else:
warn("filename should be str or list!")
raise
if to_be_coadded is None:
if "x1d" in filename:
spectrum = merged_stis_data(filename)
rebin = True
elif "mxhi" in filename:
spectrum = merged_iue_h_data(filename)
rebin = True
elif "mxlo" in filename:
spectrum = iue_l_data(filename)
rebin = False
else:
warn("File {} not supported yet, exiting".format(filename))
exit()
else:
if "x1d" in to_be_coadded[0]:
spectrum = coadd_hst_stis(to_be_coadded)
rebin = True
elif "mxhi" in to_be_coadded[0]:
spectrum = coadd_iue_h(to_be_coadded)
rebin = True
elif "mxlo" in to_be_coadded[0]:
spectrum = coadd_iue_l(to_be_coadded)
rebin = False
return spectrum, rebin
def merged_stis_data(filename, extension=1):
"""Get spectrum data from all STIS spectral orders.
If only filename is given, use SCI extension.
Returns
-------
wavs: numpy array, all wavelengths, sorted
flux: all fluxes at these wavelengths
errs: all errors at these wavelengths
"""
with fits.open(filename) as f:
t = f[extension].data
exptime = get_exptime(f[extension].header)
output_columns = ["WAVELENGTH", "FLUX", "ERROR", "NET"]
fields = [np.concatenate(t[c]) for c in output_columns]
# clean up by dq
dq = np.concatenate(t["DQ"])
good = dq == 0
print(f"STIS: {good.sum()} out of {len(good)} wavelength points are good")
fields = [c[good] for c in fields]
# sort by wavelength
idxs = np.argsort(fields[0])
fields = [c[idxs] for c in fields]
# add exptime and create Spectrum (namedtuple) object (* unpacks,
# should be in right order)
fields.append(exptime)
return Spectrum(*fields)
def merged_iue_h_data(filename):
"""
Get Spectrumn info over all orders of high res IUE data.
Returns
-------
Spectrum
"""
t = Table.read(filename)
def iue_wavs(i):
return t[i]["WAVELENGTH"] + t[i]["DELTAW"] * np.arange(t[i]["NPOINTS"])
def pixrange(i):
return slice(t[i]["STARTPIX"], t[i]["STARTPIX"] + t[i]["NPOINTS"])
def all_of_column(colname):
return np.concatenate([t[i][colname][pixrange(i)] for i in range(len(t))])
allwavs = np.concatenate([iue_wavs(i) for i in range(len(t))])
colnames = ["WAVELENGTH", "ABS_CAL", "NOISE", "NET"]
column_values = [allwavs]
for colname in colnames[1:]:
column_values.append(all_of_column(colname))
# clean up using DQ
dq = all_of_column("QUALITY")
good = dq == 0
print(f"IUE: {good.sum()} out of {len(good)} wavelength points are good")
for array in column_values:
array = array[good]
# sort by wavelength
idxs = np.argsort(column_values[0])
column_values = [c[idxs] for c in column_values]
# add exptime and create Spectrum
exptime = get_exptime(fits.getheader(filename, ext=0))
fields = column_values + [exptime]
return Spectrum(*fields)
def iue_l_data(filename):
t = Table.read(filename)
wavs = t["WAVE"][0]
flux = t["FLUX"][0]
sigma = t["SIGMA"][0]
# net is not available
net = None
# exptime is not used (for now)
exptime = None
return Spectrum(wavs, flux, sigma, net, exptime)
def coadd_iue_h(filenames):
print(f"Coadding {len(filenames)} IUE H exposures")
return coadd_general([merged_iue_h_data(fn) for fn in filenames])
def coadd_iue_l(filenames):
print(f"Coadding {len(filenames)} IUE L exposures")
spectrums = [iue_l_data(fn) for fn in filenames]
if not np.equal.reduce([s.wavs for s in spectrums]).all():
warn("Not all wavs are equal in IUE L. Implement fix pls.")
raise
# Assume that the wavs are always the same. If not, the above error
# will trigger, and I should reconsider.
numwavs = len(spectrums[0].wavs)
flux_sum = np.zeros(numwavs)
weight_sum = np.zeros(numwavs)
for s in spectrums:
good = np.isfinite(s.flux) & (s.errs > 0)
weight = 1 / s.errs ** 2
flux_sum[good] += s.flux[good] * weight[good]
weight_sum[good] += weight[good]
# simply the 1/sigma2 weighting rule
new_flux = flux_sum / weight_sum
new_errs = np.sqrt(1 / weight_sum)
return Spectrum(spectrums[0].wavs, new_flux, new_errs, None, None)
def coadd_hst_stis(filenames):
# get all SCI exposures
spectrums = []
# remember handles so we can close them later
for fn in filenames:
with fits.open(fn) as hdus:
for extension in range(1, len(hdus)):
spectrums.append(merged_stis_data(fn, extension))
print(f"Coadding {len(spectrums)} STIS exposures from {len(filenames)} files")
return coadd_general(spectrums)
def coadd_general(spectrums):
"""General function for coadding spectra.
spectrums : list of Spectrum objects
Returns
-------
spectrum : Spectrum object representing the coadded data
"""
# get all the per-wavelength data
all_wavs = [s.wavs for s in spectrums]
# determine new wavelength grid, using max of median of wavelength
# increment as step size
maxwav = np.amax(np.concatenate(all_wavs))
minwav = np.amin(np.concatenate(all_wavs))
disp = np.amax([np.median(np.diff(w)) for w in all_wavs])
newwavs = np.arange(minwav, maxwav, disp)
# instead of binning, we're just going to do nearest neighbour on a
# slightly coarser wavelength grid. It worked for Julia, so...
flux_sum = np.zeros(len(newwavs))
weight_sum = np.zeros(len(newwavs))
variance_sum = np.zeros(len(newwavs))
net_sum = np.zeros(len(newwavs))
total_exptime = np.zeros(len(newwavs))
for s in spectrums:
# nearest neighbour interpolation of all relevant quantities
def do_interp1d(quantity):
return interp1d(
s.wavs, quantity, kind="nearest", fill_value=np.nan, bounds_error=False,
)(newwavs)
fi = do_interp1d(s.flux)
ei = do_interp1d(s.errs)
ni = do_interp1d(s.net)
exptime = s.exptime
# weights scale with ni / fi = sensitivity
good_fi_ni = (fi != 0) & np.isfinite(fi) & (ni != 0) & np.isfinite(ni)
wi = np.where(good_fi_ni, ni / fi, 0) * exptime
good_wi = wi > 0
# total_counts = flux * sensitivity * exptime
# --> flux = total_counts / (sensitivity * exptime)
#
# V(flux) = V(total_counts) / (sensitivity * exptime)**2
# = total_counts / (sensitivity * exptime)**2 (poisson)
# = flux * sensitivity * exptime / (sensitivity * exptime)**2
# = flux / (sensitivity * exptime)
# sens = counts per flux unit
weight_sum[good_wi] += wi[good_wi]
flux_sum[good_wi] += wi[good_wi] * fi[good_wi]
variance_sum[good_wi] += np.square(ei[good_wi] * wi[good_wi])
net_sum[good_wi] += ni[good_wi] * exptime
total_exptime[good_wi] += exptime
flux_result = flux_sum / weight_sum
errs_result = np.sqrt(variance_sum) / weight_sum
net_result = net_sum / total_exptime
return Spectrum(newwavs, flux_result, errs_result, net_result, total_exptime)
def rebin_spectrum_around_lya(spectrum, wmin=0, wmax=1400, disp=0.25):
"""Rebin spectrum to for lya fitting, and reject certain points.
A rebinning of the spectrum to make it more useful for lya fitting.
Every new point is the weighted average of all data within the range
of a bin. The weights are flux / net * exptime if those are
available. If not 1 / errs**2 is used. The bins can be specified by
choosing a minimum, maximum wavelength and a resolution (in
Angstrom). Additionally, only the points that satisfy some basic
data rejection criteria are used.
Returns
-------
newwavs: average wavelength in each bin
newflux: average flux in each bin
"""
wavs = spectrum.wavs
flux = spectrum.flux
wavmin = max(wmin, np.amin(wavs))
wavmax = min(wmax, np.amax(wavs))
wavbins = np.arange(wavmin, wavmax, disp)
if spectrum.net is not None and spectrum.exptime is not None:
weights = spectrum.net / flux * spectrum.exptime
else:
weights = 1 / spectrum.errs ** 2
# np.digitize returns list of indices. b = 1 means that the data point
# is between wav[0] (first) and wav[1]. b = n-1 means between wav[n-2]
# and wav[n-1] (last). b = 0 or n mean out of range.
bs = np.digitize(wavs, wavbins)
newwavs = np.zeros(len(wavbins) - 1)
newflux = np.zeros(len(wavbins) - 1)
for i in range(0, len(wavbins) - 1):
in_bin = bs == i + 1 # b runs from 1 to n-1
use = np.logical_and.reduce(
[in_bin, np.isfinite(flux), weights > 0, np.isfinite(weights)]
)
# if a bin is empty or something else is wrong, the nans will be
# filtered out later
if not use.any():
newwavs[i] = 0
newflux[i] = np.nan
continue
newwavs[i] = np.average(wavs[use], weights=weights[use])
newflux[i] = np.average(flux[use], weights=weights[use])
return newwavs, newflux
def get_exptime(header):
"""Tries a couple of keywords to find the exposure time in a FITS header"""
for exptime_key in ("EXPTIME", "LEXPTIME", "SEXPTIME"):
if exptime_key in header:
exptime = float(header[exptime_key])
return exptime
# Some code to generate the above dict from scratch. Manual tweaking can
# occur after.
if __name__ == "__main__":
gen_dict = {}
here = Path(".")
for d in list(here.glob("./data/HD*")) + list(here.glob("./data/BD*")):
has_iue_h = False
has_iue_l = False
has_hst_stis = False
# has_hst_cos = False
# lower in this list of ifs is higher priority
target = Path(d).name
# def set_if_exists(glob_pattern):
# files = d.glob(glob_pattern)
# if len(files) > 0:
# spectrum_file = files[0]
iue_l_files = list(d.glob("*mxlo_vo.fits"))
if len(iue_l_files) > 0:
spectrum_file = str(iue_l_files[0])
iue_h_files = list(d.glob("*mxhi.gz"))
if len(iue_h_files) > 0:
spectrum_file = str(iue_h_files[0])
hst_stis_files = list(d.glob("**/*x1d.fits"))
if len(hst_stis_files) > 0:
spectrum_file = str(hst_stis_files[0])
gen_dict[target] = spectrum_file
print(gen_dict)
|
# %% [markdown]
# # Models and Ensembling Methods
# %% [markdown]
# ## Import dependencies
import numpy
from gensim.models import word2vec
from gensim.models import KeyedVectors
import pandas
from nltk import WordPunctTokenizer
from sklearn.preprocessing import label_binarize
import sqlite3
from sklearn.multiclass import OneVsRestClassifier
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn import svm
from itertools import cycle
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.metrics import precision_score, recall_score, roc_auc_score
from sklearn.metrics import multilabel_confusion_matrix, confusion_matrix
from sklearn.metrics import make_scorer
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from mlxtend.plotting import plot_learning_curves
import lime
import lime.lime_tabular
# %% [markdown]
# ## Define Constants
W2V_FEATURE_SIZE = 300
N_CLASSES = 4
RANDOM_STATE = 123
N_FOLDS = 5
# %% [markdown]
# ## Read in the data
# %% [markdown]
# ### Load raw train and test data
# %% [markdown]
# #### Load in the data from the database
# %%
dbconn = sqlite3.connect('./data/cleanedtraintest_v2.db')
train_data_df = pandas.read_sql_query(
'SELECT category, content_cleaned FROM train_data', dbconn)
test_data_df = pandas.read_sql_query(
'SELECT category, content_cleaned FROM test_data', dbconn)
dbconn.commit()
dbconn.close()
# %% [markdown]
# #### Check the if the data was loaded correctly
# %%
train_data_df
# %%
test_data_df
# %% [markdown]
# #### Train & Test data where x is the predictor features, y is the predicted feature
x_train = train_data_df.content_cleaned
y_train = label_binarize(train_data_df.category, classes=range(1, N_CLASSES + 1))
x_test = test_data_df.content_cleaned
y_test = label_binarize(test_data_df.category, classes=range(1, N_CLASSES + 1))
# %% [markdown]
# ### Load word2vec data
# %% [markdown]
# #### Load word2vec feature arrays from .npz files
# load dict of arrays
w2v_train_features_array_dict = numpy.load(
'./data/word2vec-train-features-120000-min5dim300.npz')
w2v_test_features_array_dict = numpy.load(
'./data/word2vec-test-features-120000-min5dim300.npz')
# extract the first array from train
data = w2v_train_features_array_dict['arr_0']
# print the array
print(data)
# extract the first array from test
data = w2v_test_features_array_dict['arr_0']
# print the array
print(data)
# %% [markdown]
# #### Load word2vec model trained key vectors
w2v_model_train = KeyedVectors.load(
'./data/custom-trained-word2vec-120000-min5dim300.kv')
# %% [markdown]
# #### Get the word2vec data back into usable form
wpt = WordPunctTokenizer()
tokenized_corpus_train = [wpt.tokenize(document) for document in x_train]
tokenized_corpus_test = [wpt.tokenize(document) for document in x_test]
# %%
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = numpy.zeros((num_features,), dtype="float32")
nwords = 0.
for word in words:
if word in vocabulary:
nwords = nwords + 1.
feature_vector = numpy.add(feature_vector, model[word])
if nwords:
feature_vector = numpy.divide(feature_vector, nwords)
return feature_vector
def averaged_word_vectorizer(corpus, model, num_features):
vocabulary = set(model.wv.index2word)
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return numpy.array(features)
# %% [markdown]
# #### Obtain document level embeddings
# %%
w2v_feature_array_train = averaged_word_vectorizer(corpus=tokenized_corpus_train,
model=w2v_model_train, num_features=W2V_FEATURE_SIZE)
w2v_feature_array_test = averaged_word_vectorizer(corpus=tokenized_corpus_test,
model=w2v_model_train, num_features=W2V_FEATURE_SIZE)
x_train_w2v = pandas.DataFrame(w2v_feature_array_train)
x_test_w2v = pandas.DataFrame(w2v_feature_array_test)
# %% [markdown]
# #### Sample down for speed, for now.
x_train_w2v = x_train_w2v.sample(
n = 3000, replace = False, random_state = RANDOM_STATE
)
y_train = train_data_df.category.sample(
n = 3000, replace = False, random_state = RANDOM_STATE
)
y_train = label_binarize(y_train, classes=range(1, N_CLASSES + 1))
# %% [markdown]
# #### Delete variables we don't need anymore to save memory
# del(w2v_feature_array_test)
# del(w2v_feature_array_train)
# del(w2v_test_features_array_dict)
# del(w2v_train_features_array_dict)
# del(tokenized_corpus_test)
# del(tokenized_corpus_train)
# del(wpt)
# del(train_data_df)
# del(test_data_df)
# del(x_train)
# del(x_test)
# del(data)
# %% [markdown]
# ## Build Models
# %% [markdown]
# ### SVM Model Building Function
def run_svm(x_train, y_train):
classifier = OneVsRestClassifier(svm.LinearSVC(random_state=RANDOM_STATE))
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Logistic Regression Model Building Function
def run_logreg(x_train, y_train):
classifier = OneVsRestClassifier(LogisticRegression(random_state=RANDOM_STATE))
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Naive Bayes Function
def run_nb(x_train, y_train):
classifier = OneVsRestClassifier(GaussianNB())
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Decision Trees Function
def run_dectree(x_train, y_train):
classifier = OneVsRestClassifier(tree.DecisionTreeClassifier())
classifier.fit(x_train, y_train)
return classifier
# %% [markdown]
# ### Functions to calculate scores and to plot them
# Calculate, then plot the Precision, Recall, Average Precision, F1
def prf1_calc(classifier, algo_name, n_classes, x_test, y_test):
# Get the decision function from the classifier
if algo_name == 'SVM':
y_score = classifier.decision_function(x_test)
else:
y_score = classifier.predict_proba(x_test)
y_pred = classifier.predict(x_test)
# The average precision score in multi-label settings
# For each class
precision = dict()
recall = dict()
average_f1 = dict()
average_precision = dict()
mcc = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
average_f1[i] = f1_score(y_test[:, i], y_pred[:, i])
mcc[i] = matthews_corrcoef(y_test[:, i], y_pred[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
average_f1['micro'] = f1_score(y_test, y_pred, average='micro')
mcc['micro'] = sum(mcc.values())/4
# Plot the data
prf1_plot(precision, recall, average_precision, algo_name, n_classes)
# Return all metrics
results = pandas.DataFrame()
for k in average_precision.keys():
results.at[algo_name, f'P-R {k}'] = numpy.round(average_precision[k], 3)
results.at[algo_name, f'F1 {k}'] = numpy.round(average_f1[k], 3)
results.at[algo_name, f'MCC {k}'] = numpy.round(mcc[k], 3)
return results
# Function to Plot Precision, Recall, F1
def prf1_plot(precision, recall, average_precision, algo_name, n_classes):
print(algo_name)
print('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
# Plot the micro-averaged Precision-Recall curve
plt.figure()
plt.step(recall['micro'], precision['micro'], where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(
'Average precision score, micro-averaged over all classes: AP={0:0.2f}'
.format(average_precision["micro"]))
# Plot Precision-Recall curve for each class and iso-f1 curves
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(7, 8))
f_scores = numpy.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = numpy.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(lines, labels, loc=(0, -.5), prop=dict(size=14))
plt.show()
# %% [markdown]
# ## Run the Base Models
# %%
# Run SVM Model
svm_model = run_svm(x_train_w2v, y_train)
# %%
# Run Logistic Regression Model
logreg_model = run_logreg(x_train_w2v, y_train)
# %%
# Run Naive Bayes Classifier
nb_model = run_nb(x_train_w2v, y_train)
# %%
# Run Decision Trees Classifier
dectree_model = run_dectree(x_train_w2v, y_train)
# %% [markdown]
# ## Get the scores
# %%
# Initialize the dataframe to keep track of the scores
scores = pandas.DataFrame()
# %%
# Precision, Recall, Avg. Precision for SVM
scores = scores.append(prf1_calc(svm_model, 'SVM', N_CLASSES, x_test_w2v, y_test))
# %%
# Precision, Recall, Avg. Precision for LOG REG
scores = scores.append(prf1_calc(logreg_model, 'LOGREG', N_CLASSES, x_test_w2v, y_test))
# %%
# Precision, Recall, Avg. Precision for Naive Bayes
scores = scores.append(prf1_calc(nb_model, 'NB', N_CLASSES, x_test_w2v, y_test))
# %%
# Precision, Recall, Avg. Precision for Decision Trees
scores = scores.append(prf1_calc(dectree_model, 'DT', N_CLASSES, x_test_w2v, y_test))
# %% [markdown]
# ## Look at Cross-Validation
# %% Create model list to iterate through for cross validation
gnb = OneVsRestClassifier(GaussianNB())
sv = OneVsRestClassifier(svm.LinearSVC(random_state=RANDOM_STATE))
lreg = OneVsRestClassifier(LogisticRegression(random_state=RANDOM_STATE))
dtree = OneVsRestClassifier(tree.DecisionTreeClassifier())
model_list = [gnb, sv, lreg, dtree]
model_namelist = ['Gaussian Naive Bayes',
'SVM/Linear SVC',
'Logistic Regression',
'Decision Tree']
# %% Make scoring metrics to pass cv function through
scoring = {'precision': make_scorer(precision_score, average='micro'),
'recall': make_scorer(recall_score, average='micro'),
'f1': make_scorer(f1_score, average='micro'),
'roc_auc': make_scorer(roc_auc_score, average='micro'),
# 'mcc': make_scorer(matthews_corrcoef) <- cannot support multi-label
}
cv_result_entries = []
i = 0
# %% Loop cross validation through various models and generate results
for mod in model_list:
metrics = cross_validate(
mod,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
cv_result_entries.append((model_namelist[i], fold_index, key, score))
i += 1
# %%
#cv_result_entries = pandas.read_csv('./data/cv-results.csv')
cv_results_df = pandas.DataFrame(cv_result_entries)
#cv_results_df.drop('Unnamed: 0', axis=1, inplace=True)
cv_results_df.columns = ['algo', 'cv fold', 'metric', 'value']
#test_df = pandas.DataFrame((cv_results_df[cv_results_df.metric.eq('fit_time')]))
# %% Plot cv results
for metric_name, metric in zip(['fit_time',
'test_precision',
'test_recall',
'test_f1',
'test_roc_auc'],
['Fit Time',
'Precision',
'Recall',
'F1 Score',
'ROC AUC']):
sns.boxplot(x='algo', y='value', #hue='algo',
data=cv_results_df[cv_results_df.metric.eq(f'{metric_name}')])
sns.stripplot(x='algo', y = 'value',
data = cv_results_df[cv_results_df.metric.eq(f'{metric_name}')],
size = 5, linewidth = 1)
plt.title(f'{metric} Algo Comparison', fontsize=12)
plt.xlabel('Algorithm', fontsize=12)
plt.ylabel(f'{metric}', fontsize=12)
plt.xticks([0, 1, 2, 3, 4])
plt.xticks(rotation=45)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# %% Misclassification Errors
i=0
for model in model_list:
plt.figure()
plot_learning_curves(x_train_w2v, y_train, x_test_w2v, y_test, model)
plt.title('Learning Curve for ' + model_namelist[i], fontsize=14)
plt.xlabel('Training Set Size (%)', fontsize=12)
plt.ylabel('Misclassification Error', fontsize=12)
plt.show()
i += 1
# %% Get predictions
y_test_pred = []
for model in model_list:
y_test_pred.append(model.predict(x_test_w2v))
# %% Confusion Matrix
CLASSES = ['World', 'Sports', 'Business', 'Sci/Tech']
i=0
for _ in model_list:
cm = confusion_matrix(numpy.argmax(y_test, axis=1),
numpy.argmax(y_test_pred[i], axis=1))
cm_df = pandas.DataFrame(cm, index = CLASSES, columns = CLASSES)
cm_df.index.name = 'Actual'
cm_df.columns.name = 'Predicted'
plt.title('Confusion Matrix for ' + model_namelist[i], fontsize=14)
sns.heatmap(cm_df, annot=True, fmt='.6g', annot_kws={"size": 10}, cmap='Reds')
plt.show()
i += 1
# %% HYPER PARAMETER TUNING BY HYPEROPT (not working)
'''from hyperopt import STATUS_OK
N_FOLDS = 5
#%%
#Objective Function
def objective(params, n_folds = N_FOLDS):
cv_results = cross_validate(OneVsRestClassifier(GaussianNB()),
x_train_w2v,
y_train,
cv = n_folds,
fit_params= params,
scoring = {'f1': make_scorer(f1_score, average='micro')},
return_train_score=False,
n_jobs=-1
)
# Extract the best score
best_score = max(cv_results['test_f1'])
# Loss must be minimized
loss = 1 - best_score
# Dictionary with information for evaluation
return {'loss': loss, 'params': params, 'status': STATUS_OK}
# %%
#Domain Space
from hyperopt import hp
space = {'estimator__var_smoothing': hp.uniform('estimator__var_smoothing',
1.e-09, 1.e+00)}
#%%
# Optimization Algorithm
from hyperopt import tpe
tpe_algo = tpe.suggest
#%%
# Results History
from hyperopt import Trials
bayes_trials = Trials()
#%%
# Run the optimization
from hyperopt import fmin
from hyperopt import rand
MAX_EVALS = 500
params = space
# Optimize
best = fmin(fn = objective, space = space, algo = tpe.suggest,
max_evals = 100, trials = bayes_trials)
print(best)'''
# %% [markdown]
# ## Hyper-parameter tuning with exhaustive Grid Search
# ### Tune hyperparameters for Gaussian Naive-Bayes
params_gnb = {'estimator__var_smoothing': [1.e-09, 1.e-08, 1.e-07, 1.e-06, 1.e-05,
1.e-04, 1.e-03, 1.e-02, 1.e-01, 1.e+00]
}
clf = GridSearchCV(estimator=gnb,
param_grid=params_gnb,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=True
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best Score: ', clf_res.best_score_)
print('Best Params: ', clf_res.best_params_)
# %%
# ### Tune hyperparameters for Logistic Regression
params_lreg = {
"estimator__penalty": ['l1', 'l2'],
"estimator__C": [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000],
#"estimator__class_weight":[{1:0.5, 0:0.5}, {1:0.4, 0:0.6},
# {1:0.6, 0:0.4}, {1:0.7, 0:0.3}],
"estimator__solver": ["newton-cg", "sag", "saga", "lbfgs"]
}
clf = GridSearchCV(estimator=lreg,
param_grid=params_lreg,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=True
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best score:', clf_res.best_score_)
print('Best Params:', clf_res.best_params_)
# %%
# ### Tune hyperparameters for SVM (Linear SVC)
params_sv = {
"estimator__penalty":['l1', 'l2'],
"estimator__tol": [1.e-08, 1.e-07, 1.e-06, 1.e-05,
1.e-04, 1.e-03, 1.e-02, 1.e-01, 1.e+00],
"estimator__loss":['hinge','squared_hinge'],
"estimator__C": [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000]
#"estimator__class_weight":['None',{1:0.5, 0:0.5},
# {1:0.4, 0:0.6}, {1:0.6, 0:0.4}, {1:0.7, 0:0.3}],
}
clf = GridSearchCV(estimator=sv,
param_grid=params_sv,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=False
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best score:', clf_res.best_score_)
print('Best Params:', clf_res.best_params_)
# %%
# ### Tune hyperparameters for Decision Trees
params_dtree = {
"estimator__splitter":["best", "random"],
"estimator__min_samples_split":range(1, 20, 1)
}
clf = GridSearchCV(estimator=dtree,
param_grid=params_dtree,
scoring='f1_micro',
n_jobs=-1,
cv=N_FOLDS,
return_train_score=False
)
clf_res = clf.fit(x_train_w2v, y_train)
print('Best score:', clf_res.best_score_)
print('Best Params:', clf_res.best_params_)
# %% [markdown]
# ## Ensemble Methods
# %% [markdown]
# ### Stacking
estimators = [
('nb', GaussianNB()),
('svm', svm.LinearSVC())
]
sclf = OneVsRestClassifier(StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression())
)
metrics = cross_validate(
sclf,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
# %%
res = []
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
res.append(('Stacking', fold_index, key, score))
# %%
res_df = pandas.DataFrame.from_dict(res)
res_df.columns = ['algo', 'cv fold', 'metric', 'value']
cv_results_inc_ens = pandas.concat([cv_results_df, res_df])
# %% [markdown]
# ### Bagging
sclf = OneVsRestClassifier(BaggingClassifier(
base_estimator=LogisticRegression())
)
metrics = cross_validate(
sclf,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
# %%
res = []
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
res.append(('Bagging', fold_index, key, score))
# %%
res_df = pandas.DataFrame.from_dict(res)
res_df.columns = ['algo', 'cv fold', 'metric', 'value']
cv_results_inc_ens = pandas.concat([cv_results_inc_ens, res_df])
# %% [markdown]
# ### Boosting
from sklearn.ensemble import AdaBoostClassifier
sclf = OneVsRestClassifier(AdaBoostClassifier(
random_state=RANDOM_STATE)
)
metrics = cross_validate(
sclf,
x_train_w2v,
y_train,
cv=N_FOLDS,
scoring = scoring,
return_train_score=False,
n_jobs=-1
)
# %%
res = []
for key in metrics.keys():
for fold_index, score in enumerate(metrics[key]):
res.append(('AdaBoost', fold_index, key, score))
# %%
res_df = pandas.DataFrame.from_dict(res)
res_df.columns = ['algo', 'cv fold', 'metric', 'value']
cv_results_inc_ens = pandas.concat([cv_results_inc_ens, res_df])
# %%
#cv_results_inc_ens.to_csv('./data/cv-results-inc-ens.csv')
# %% [markdown]
# ### Plot the results including ensembling
for metric_name, metric in zip(['fit_time',
'test_precision',
'test_recall',
'test_f1',
'test_roc_auc'],
['Fit Time',
'Precision',
'Recall',
'F1 Score',
'ROC AUC']):
sns.lineplot(x='cv fold', y='value', hue='algo',
data=cv_results_inc_ens[cv_results_inc_ens.metric.eq(f'{metric_name}')])
plt.title(f'{metric} Algo Comparison', fontsize=12)
plt.xlabel('CV Fold', fontsize=12)
plt.ylabel(f'{metric}', fontsize=12)
plt.xticks([0, 1, 2, 3, 4])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# %% [markdown]
# ## LIME for model interpretation
# predict_fn = model_namelist
# i = 0
# for model in model_list:
# predict_fn[i] = lambda x: model.predict_proba(x).astype(float)
# i += 1
# %%
feature_names = list(w2v_model_train.vocab)
# %%
import lime.lime_text
class_names=['W','S','B','T']
explainer = lime.lime_tabular.LimeTabularExplainer(x_test_w2v.values,
feature_names=feature_names, mode='classification', class_names=class_names)
# %%
# use gnb (predict_fn[0])
idx = 34
exp = explainer.explain_instance(x_test_w2v.values[idx], lreg.predict_proba, num_features=20, top_labels=4)
print('Document id: %d' % idx)
# print('Predicted class =', class_names[lreg.predict(numpy.argmax(y_test, axis=1)[idx])])#.reshape(1,-1)[0,0]])
# print('True class: %s' % class_names[y_test[idx]])
# %%
print ('\n'.join(map(str, exp.as_list(label=0))))
# %%
exp.show_in_notebook()
#%% ELI 5
import eli5
import sklearn.pipeline
import IPython
#for pipeline in pipelines:
print('Estimator: ' % (['Logistic Regression']))
#labels = pipeline['pipeline'].classes_.tolist()
#estimator = lreg
eli5.show_weights(estimator = lreg, top = 10, target_names = ['W','S','B','T'], feature_names = x_train_w2v)
#eli5.show_prediction(estimator = lreg, doc = x_test_w2v.values[34], target_names = ['W','S','B','T'])
#target_names = ['W','S','B','T'], vec = word2vec))
# %% [markdown]
# ## References - Code sample sources disclaimer:
# Code for this project is either directly from (with some modification),
# or inspired by, but not limited to the following sources:
# - Respective documentation and examples from each used API's doc/guide website
# - Kelly Epley Naive Bayes:
# https://towardsdatascience.com/naive-bayes-document-classification-in-python-e33ff50f937e
# - MLWhiz's excellent blogs about text classification and NLP:
# https://mlwhiz.com/blog/2018/12/17/text_classification/
# https://mlwhiz.com/blog/2019/01/17/deeplearning_nlp_preprocess/
# https://mlwhiz.com/blog/2019/02/08/deeplearning_nlp_conventional_methods/
# https://www.kaggle.com/mlwhiz/conventional-methods-for-quora-classification/
# - Christof Henkel preprocessing:
# https://www.kaggle.com/christofhenkel/how-to-preprocessing-when-using-embeddings
# - datanizing GmbH:
# https://medium.com/@datanizing/modern-text-mining-with-python-part-1-of-5-introduction-cleaning-and-linguistics-647f9ec85b6a
# - Datacamp wordcloud:
# https://www.datacamp.com/community/tutorials/wordcloud-python
# - Seaborn Pydata tutorials:
# https://seaborn.pydata.org/introduction.html#intro-plot-customization
# - Dipanjan S's tutorials:
# https://github.com/dipanjanS
# - Analytics Vidhya:
# https://www.analyticsvidhya.com/blog/2018/04/a-comprehensive-guide-to-understand-and-implement-text-classification-in-python/
# - Jason Brownlee's Feature Selection For Machine Learning in Python
# https://machinelearningmastery.com/feature-selection-machine-learning-python/
# - Susan Li's Multi-class text classification with Scikit-learn:
# https://towardsdatascience.com/multi-class-text-classification-with-scikit-learn-12f1e60e0a9f
# - Vadim Smolyakov Ensemble Learning to Improve Machine Learning Results:
# https://blog.statsbot.co/ensemble-learning-d1dcd548e936
# - Udacity course video on Youtube UD120:
# https://www.youtube.com/watch?v=GdsLRKjjKLw
# - Hyperparameter Tuning with Hyperopt
# https://towardsdatascience.com/automated-machine-learning-hyperparameter-tuning-in-python-dfda59b72f8a
# - Hyperparameter Tuning for Gaussian NB
# https://www.quora.com/Can-the-prior-in-a-naive-Bayes-be-considered-a-hyperparameter-and-tuned-for-better-accuracy
# - Hyperparameter Tuning for Decision Trees
# https://towardsdatascience.com/how-to-tune-a-decision-tree-f03721801680
# - Lime tutorial
# https://marcotcr.github.io/lime/tutorials/Lime%20-%20multiclass.html
# - ELI5
# https://towardsdatascience.com/3-ways-to-interpretate-your-nlp-model-to-management-and-customer-5428bc07ce15
|
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from copy import deepcopy
from UI.app import app
from UI.pages import (
data_upload,
problem_formulation,
train_model,
optimize,
navigator_2_fronts,
coloring_parallel_coords,
)
pages = {
"/upload": data_upload,
"/problem": problem_formulation,
"/train": train_model,
"/optimize": optimize,
"/navigate": navigator_2_fronts,
"/colour": coloring_parallel_coords,
}
o_nautilus_page_order = ["/upload", "/problem", "/train", "/optimize", "/navigate"]
parallel_coords_colouring_order = ["/upload", "/problem", "/colour"]
layout = html.Div(
[
dcc.Location(id="url", refresh=False),
html.Div(id="page-content", children=[]),
html.Div(id="app_choice", children=None, hidden=True),
]
)
home_page = html.Div(
[
dbc.Row(
dbc.Col(
html.H1("Choose an application"), className="row justify-content-center"
)
),
dbc.Row(
dbc.Col(
dbc.ButtonGroup(
[
dbc.Button(
"O-NAUTILUS",
id="onautilus_button",
n_clicks_timestamp=-1,
href="/upload#O-NAUTILUS",
className="mr-1 mt-1",
color="primary",
),
dbc.Button(
"Coloured Parallel Coordinates",
id="cpc_button",
n_clicks_timestamp=-1,
href="/upload#CPC",
className="mr-1 mt-1",
color="primary",
),
]
),
className="row justify-content-center",
)
),
]
)
def navbuttons(prev, home, next_):
buttons = [
dbc.Row(
dbc.Col(
dbc.ButtonGroup(
[
dbc.Button(
"Previous",
id="prev_button",
n_clicks_timestamp=-1,
href=prev,
className="mt-3",
color="primary",
),
dbc.Button(
"Home",
id="home_button",
n_clicks_timestamp=-1,
href=home,
className="ml-1 mr-1 mt-3",
color="primary",
),
dbc.Button(
"Next",
id="next_button",
n_clicks_timestamp=-1,
href=next_,
className="mt-3",
color="primary",
),
]
),
className="row justify-content-center",
)
)
]
return buttons
app.layout = layout
@app.callback(
Output("page-content", "children"),
[Input("url", "pathname")],
[State("url", "hash")],
)
def display_page(pathname, app_choice):
if pathname == "/":
return home_page
elif pathname in pages:
layout = pages[pathname].layout()
prev = "/"
next_ = "/error"
if app_choice == "#O-NAUTILUS":
current_page_index = o_nautilus_page_order.index(pathname)
if current_page_index != 0:
prev = o_nautilus_page_order[current_page_index - 1]
if current_page_index != len(o_nautilus_page_order) - 1:
next_ = o_nautilus_page_order[current_page_index + 1]
elif app_choice == "#CPC":
current_page_index = parallel_coords_colouring_order.index(pathname)
if current_page_index != 0:
prev = parallel_coords_colouring_order[current_page_index - 1]
if current_page_index != len(parallel_coords_colouring_order) - 1:
next_ = parallel_coords_colouring_order[current_page_index + 1]
else:
return "404"
layout.children.extend(
deepcopy(
navbuttons(prev + app_choice, "/" + app_choice, next_ + app_choice)
)
)
return layout
else:
return "404"
if __name__ == "__main__":
app.run_server(debug=False)
|
MESSAGE_TEXT='**Official Store:** https://shop.safemoon.net'
def store(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text=MESSAGE_TEXT , parse_mode='markdown', disable_web_page_preview='True')
|
import random as r
from termcolor import cprint
CHARSLIST = list("#@&\"'§!°-_*$`%=+/:;.,?^|<>{}[]()")
ACTIONS = {
'=':'Assign a value',
':':'Specify a type',
'<':'Start cast to a type',
'>':'End cast to a type',
'[':'Start a list',
']':'End a list',
'ö':'Start a dict',
'ë':'End a dict',
'°':'End a dict',
'$':'Concatenation',
'`':'Interpolation start',
'ô':'Interpolation end',
'+':'Addition',
'-':'Substraction',
'*':'Multiplication',
'^':'Power',
'/':'Division',
'%':'Modulo',
'@':'Start index localisation',
'ê':'End index localisation',
'#':'Add a comment',
'¨':'Open a multiline comment',
'£':'Close a multiline comment',
'ç':'Check equality',
'à':'Check strong equality',
'!':'Logical NOT',
'&':'Logical AND',
'|':'Logical OR',
'.':'Access an object property',
'é':'Access a class property',
'?':'Start a tertiary condition',
'{':'Start a scope',
'}':'End a scope',
'(':'Start a function call',
')':'End a function call',
'"':'Indicate a string',
';':'End a line',
',':'Common separator'
}
SAMPLE = """
# DO NOT use that
unsecure_mode: bool = 0;
# Get userinput
plz = "please";
usrin = input("Your name `plzô")<str>;
if !empty(usrin) & xss_safe(usrin) | unsecure_mode {
col1: hexColor = "0x" $ input();
col2: hexColor = "0x" $ input();
info = ö
"IP":get_ip(),
"date":now()<str>,
"theme":[col1, col2]
ë
header_color.set(info@themeê@0ê);
display(WebsiteéResourceséLogo(col2));
} else {
¨
We should really handle that shit though
this is not my job anymore
Show a random calculation instead
£
display(8 + 9 / 4 * 5 ^ 9 % 8);
}
"""
MIN_LEN = 1
MAX_LEN = 3
MAX_RPT = 3
def gen_operator(length=None, blacklist=False, unallowed=[]):
charslist = [e for e in CHARSLIST if e not in (blacklist if blacklist else [])]
result = str()
length = r.randint(MIN_LEN, MAX_LEN) if length is None else int(length)
for i in range(length):
seled = r.choice(charslist)
while seled in unallowed:
cprint(f'Tried: {seled}, already taken','red')
seled = r.choice(charslist)
# todo add approx. 75% chance to choose a previous char (from current buffer), in order to reduce chaosness in result
result += seled
return result
maxlen = max([len(e) for e in ACTIONS.values()])
repl_map = dict()
mul_length_codes = list("¨£#&|$")
forced_codes = list('')
for acode, a in ACTIONS.items():
if acode in forced_codes:
repl_map[acode] = acode
continue
l = None if acode in mul_length_codes else 1
op = gen_operator(l, blacklist=forced_codes, unallowed=list(repl_map.values()))
sp = ' ' * (1 + maxlen - len(a))
print(f"{a}{sp}{op}")
repl_map[acode] = op
print("=" * 30 + "This is what a code sample would like:")
sample = str()
for c in SAMPLE:
sample += repl_map.get(c, c)
print(sample) |
import time
start_time = time.time()
import json, yaml
import pandas as pd
import numpy as np
import redacted_logging as rlog
logger = rlog.get_logger(__name__)
#read input file
try:
with open(r'/inputVolume/security_input.yaml') as file:
inputYAML = yaml.load(file, Loader=yaml.FullLoader)
logger.info("Reading request.yaml file...")
except FileNotFoundError:
logger.error("Cannot find security_input.yaml file!")
else:
parties = inputYAML['parties']
# Read all encrypted datasets into a list #
try:
smallest = 0
dataset_list = []
for p in parties:
df_eachParty = pd.read_csv('/data/encrypted_%s.csv' %(p)).set_index('encString')
dataset_list.append(df_eachParty)
except:
logger.error("Verification and decrption failed! No files are able to be executed. Please check your keys.")
else:
for i in range(0, len(parties)):
logger.info('{dataparty} has {datasize} rows'.format(dataparty=parties[i], datasize=len(dataset_list[i])))
# Order the size of datasets from small to large #
sizes = []
for dataset in dataset_list:
sizes.append(len(dataset))
order = np.argsort(sizes)
# Find match records #
for item in range(0, len(order)):
multi_match = []
multi_match_number = []
exact_match = []
no_match = []
if item == 0:
combined_df = dataset_list[order[item]]
else:
for i in combined_df.index:
try:
pair = dataset_list[order[item]].loc[i]
if type(pair) == pd.DataFrame:
multi_match.append(i)
multi_match_number.append(len(pair))
elif type(pair) == pd.Series:
exact_match.append(i)
except:
no_match.append(i)
# Report matches #
logger.debug('Matching result - Exact {exNum}'.format(exNum=len(exact_match)))
logger.debug('Matching result - Multi {mulNum}'.format(mulNum=len(multi_match)))
logger.debug('Matching result - None {noNum}'.format(noNum=len(no_match)))
logger.debug("Multi-matching array: {array} ".format(array=str(multi_match_number)))
# Link and combine actual data with person identifiers #
combined_df = pd.concat([combined_df.loc[exact_match], dataset_list[order[item]].loc[exact_match]], axis=1, join='inner')
if len(exact_match) > 0:
# Restrict analysis if exact match is less than 100 #
if len(exact_match) < 100:
logger.warning("The number of exact-matched instances is less than 100!!")
sys.exit("Due to priavcy concerns, execution is interrupted here. Please provide datasets which have more common instances!")
else:
cmb_col = list(combined_df.columns)
with open('/output/CombinedFeatures.txt', 'w') as f:
for item in cmb_col:
f.write("%s\n" % item)
# Save file #
combined_df.to_csv('/data/act_data.csv')
logger.debug('Features in combined dataset are saved locally')
logger.info("Matching and linking took {runtime:.4f}s to run.".format(runtime=(time.time() - start_time)))
else:
logger.error('No records have been exactly matched so that no combined dataset is generated!!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.